summaryrefslogtreecommitdiffstats
path: root/js/src/jit/shared
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/jit/shared
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/shared')
-rw-r--r--js/src/jit/shared/Assembler-shared.h991
-rw-r--r--js/src/jit/shared/BaselineCompiler-shared.cpp146
-rw-r--r--js/src/jit/shared/BaselineCompiler-shared.h172
-rw-r--r--js/src/jit/shared/CodeGenerator-shared-inl.h437
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.cpp1865
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.h850
-rw-r--r--js/src/jit/shared/IonAssemblerBuffer.h417
-rw-r--r--js/src/jit/shared/IonAssemblerBufferWithConstantPools.h1145
-rw-r--r--js/src/jit/shared/LIR-shared.h8904
-rw-r--r--js/src/jit/shared/LOpcodes-shared.h441
-rw-r--r--js/src/jit/shared/Lowering-shared-inl.h858
-rw-r--r--js/src/jit/shared/Lowering-shared.cpp306
-rw-r--r--js/src/jit/shared/Lowering-shared.h296
13 files changed, 16828 insertions, 0 deletions
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
new file mode 100644
index 000000000..aac9687b8
--- /dev/null
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -0,0 +1,991 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Assembler_shared_h
+#define jit_shared_Assembler_shared_h
+
+#include "mozilla/PodOperations.h"
+
+#include <limits.h>
+
+#include "jit/AtomicOp.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Label.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "vm/HelperThreads.h"
+#include "wasm/WasmTypes.h"
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+// Push return addresses callee-side.
+# define JS_USE_LINK_REGISTER
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+// JS_SMALL_BRANCH means the range on a branch instruction
+// is smaller than the whole address space
+# define JS_SMALL_BRANCH
+#endif
+
+namespace js {
+namespace jit {
+
+namespace Disassembler {
+class HeapAccess;
+} // namespace Disassembler
+
+static const uint32_t Simd128DataSize = 4 * sizeof(int32_t);
+static_assert(Simd128DataSize == 4 * sizeof(int32_t), "SIMD data should be able to contain int32x4");
+static_assert(Simd128DataSize == 4 * sizeof(float), "SIMD data should be able to contain float32x4");
+static_assert(Simd128DataSize == 2 * sizeof(double), "SIMD data should be able to contain float64x2");
+
+enum Scale {
+ TimesOne = 0,
+ TimesTwo = 1,
+ TimesFour = 2,
+ TimesEight = 3
+};
+
+static_assert(sizeof(JS::Value) == 8,
+ "required for TimesEight and 3 below to be correct");
+static const Scale ValueScale = TimesEight;
+static const size_t ValueShift = 3;
+
+static inline unsigned
+ScaleToShift(Scale scale)
+{
+ return unsigned(scale);
+}
+
+static inline bool
+IsShiftInScaleRange(int i)
+{
+ return i >= TimesOne && i <= TimesEight;
+}
+
+static inline Scale
+ShiftToScale(int i)
+{
+ MOZ_ASSERT(IsShiftInScaleRange(i));
+ return Scale(i);
+}
+
+static inline Scale
+ScaleFromElemWidth(int shift)
+{
+ switch (shift) {
+ case 1:
+ return TimesOne;
+ case 2:
+ return TimesTwo;
+ case 4:
+ return TimesFour;
+ case 8:
+ return TimesEight;
+ }
+
+ MOZ_CRASH("Invalid scale");
+}
+
+// Used for 32-bit immediates which do not require relocation.
+struct Imm32
+{
+ int32_t value;
+
+ explicit Imm32(int32_t value) : value(value)
+ { }
+
+ static inline Imm32 ShiftOf(enum Scale s) {
+ switch (s) {
+ case TimesOne:
+ return Imm32(0);
+ case TimesTwo:
+ return Imm32(1);
+ case TimesFour:
+ return Imm32(2);
+ case TimesEight:
+ return Imm32(3);
+ };
+ MOZ_CRASH("Invalid scale");
+ }
+
+ static inline Imm32 FactorOf(enum Scale s) {
+ return Imm32(1 << ShiftOf(s).value);
+ }
+};
+
+// Pointer-sized integer to be embedded as an immediate in an instruction.
+struct ImmWord
+{
+ uintptr_t value;
+
+ explicit ImmWord(uintptr_t value) : value(value)
+ { }
+};
+
+// Used for 64-bit immediates which do not require relocation.
+struct Imm64
+{
+ uint64_t value;
+
+ explicit Imm64(int64_t value) : value(value)
+ { }
+
+ Imm32 low() const {
+ return Imm32(int32_t(value));
+ }
+
+ Imm32 hi() const {
+ return Imm32(int32_t(value >> 32));
+ }
+
+ inline Imm32 firstHalf() const;
+ inline Imm32 secondHalf() const;
+};
+
+#ifdef DEBUG
+static inline bool
+IsCompilingWasm()
+{
+ // wasm compilation pushes a JitContext with a null JSCompartment.
+ return GetJitContext()->compartment == nullptr;
+}
+#endif
+
+// Pointer to be embedded as an immediate in an instruction.
+struct ImmPtr
+{
+ void* value;
+
+ explicit ImmPtr(const void* value) : value(const_cast<void*>(value))
+ {
+ // To make code serialization-safe, wasm compilation should only
+ // compile pointer immediates using a SymbolicAddress.
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R>
+ explicit ImmPtr(R (*pf)())
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1>
+ explicit ImmPtr(R (*pf)(A1))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2>
+ explicit ImmPtr(R (*pf)(A1, A2))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3>
+ explicit ImmPtr(R (*pf)(A1, A2, A3))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3, class A4>
+ explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+};
+
+// The same as ImmPtr except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedImmPtr {
+ void* value;
+
+ explicit PatchedImmPtr()
+ : value(nullptr)
+ { }
+ explicit PatchedImmPtr(const void* value)
+ : value(const_cast<void*>(value))
+ { }
+};
+
+class AssemblerShared;
+class ImmGCPtr;
+
+// Used for immediates which require relocation.
+class ImmGCPtr
+{
+ public:
+ const gc::Cell* value;
+
+ explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr)
+ {
+ // Nursery pointers can't be used if the main thread might be currently
+ // performing a minor GC.
+ MOZ_ASSERT_IF(ptr && !ptr->isTenured(),
+ !CurrentThreadIsIonCompilingSafeForMinorGC());
+
+ // wasm shouldn't be creating GC things
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ private:
+ ImmGCPtr() : value(0) {}
+};
+
+// Pointer to be embedded as an immediate that is loaded/stored from by an
+// instruction.
+struct AbsoluteAddress
+{
+ void* addr;
+
+ explicit AbsoluteAddress(const void* addr)
+ : addr(const_cast<void*>(addr))
+ {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ AbsoluteAddress offset(ptrdiff_t delta) {
+ return AbsoluteAddress(((uint8_t*) addr) + delta);
+ }
+};
+
+// The same as AbsoluteAddress except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedAbsoluteAddress
+{
+ void* addr;
+
+ explicit PatchedAbsoluteAddress()
+ : addr(nullptr)
+ { }
+ explicit PatchedAbsoluteAddress(const void* addr)
+ : addr(const_cast<void*>(addr))
+ { }
+ explicit PatchedAbsoluteAddress(uintptr_t addr)
+ : addr(reinterpret_cast<void*>(addr))
+ { }
+};
+
+// Specifies an address computed in the form of a register base and a constant,
+// 32-bit offset.
+struct Address
+{
+ Register base;
+ int32_t offset;
+
+ Address(Register base, int32_t offset) : base(base), offset(offset)
+ { }
+
+ Address() { mozilla::PodZero(this); }
+};
+
+// Specifies an address computed in the form of a register base, a register
+// index with a scale, and a constant, 32-bit offset.
+struct BaseIndex
+{
+ Register base;
+ Register index;
+ Scale scale;
+ int32_t offset;
+
+ BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0)
+ : base(base), index(index), scale(scale), offset(offset)
+ { }
+
+ BaseIndex() { mozilla::PodZero(this); }
+};
+
+// A BaseIndex used to access Values. Note that |offset| is *not* scaled by
+// sizeof(Value). Use this *only* if you're indexing into a series of Values
+// that aren't object elements or object slots (for example, values on the
+// stack, values in an arguments object, &c.). If you're indexing into an
+// object's elements or slots, don't use this directly! Use
+// BaseObject{Element,Slot}Index instead.
+struct BaseValueIndex : BaseIndex
+{
+ BaseValueIndex(Register base, Register index, int32_t offset = 0)
+ : BaseIndex(base, index, ValueScale, offset)
+ { }
+};
+
+// Specifies the address of an indexed Value within object elements from a
+// base. The index must not already be scaled by sizeof(Value)!
+struct BaseObjectElementIndex : BaseValueIndex
+{
+ BaseObjectElementIndex(Register base, Register index, int32_t offset = 0)
+ : BaseValueIndex(base, index, offset)
+ {
+ NativeObject::elementsSizeMustNotOverflow();
+ }
+};
+
+// Like BaseObjectElementIndex, except for object slots.
+struct BaseObjectSlotIndex : BaseValueIndex
+{
+ BaseObjectSlotIndex(Register base, Register index)
+ : BaseValueIndex(base, index)
+ {
+ NativeObject::slotsSizeMustNotOverflow();
+ }
+};
+
+class Relocation {
+ public:
+ enum Kind {
+ // The target is immovable, so patching is only needed if the source
+ // buffer is relocated and the reference is relative.
+ HARDCODED,
+
+ // The target is the start of a JitCode buffer, which must be traced
+ // during garbage collection. Relocations and patching may be needed.
+ JITCODE
+ };
+};
+
+class RepatchLabel
+{
+ static const int32_t INVALID_OFFSET = 0xC0000000;
+ int32_t offset_ : 31;
+ uint32_t bound_ : 1;
+ public:
+
+ RepatchLabel() : offset_(INVALID_OFFSET), bound_(0) {}
+
+ void use(uint32_t newOffset) {
+ MOZ_ASSERT(offset_ == INVALID_OFFSET);
+ MOZ_ASSERT(newOffset != (uint32_t)INVALID_OFFSET);
+ offset_ = newOffset;
+ }
+ bool bound() const {
+ return bound_;
+ }
+ void bind(int32_t dest) {
+ MOZ_ASSERT(!bound_);
+ MOZ_ASSERT(dest != INVALID_OFFSET);
+ offset_ = dest;
+ bound_ = true;
+ }
+ int32_t target() {
+ MOZ_ASSERT(bound());
+ int32_t ret = offset_;
+ offset_ = INVALID_OFFSET;
+ return ret;
+ }
+ int32_t offset() {
+ MOZ_ASSERT(!bound());
+ return offset_;
+ }
+ bool used() const {
+ return !bound() && offset_ != (INVALID_OFFSET);
+ }
+
+};
+// An absolute label is like a Label, except it represents an absolute
+// reference rather than a relative one. Thus, it cannot be patched until after
+// linking.
+struct AbsoluteLabel : public LabelBase
+{
+ public:
+ AbsoluteLabel()
+ { }
+ AbsoluteLabel(const AbsoluteLabel& label) : LabelBase(label)
+ { }
+ int32_t prev() const {
+ MOZ_ASSERT(!bound());
+ if (!used())
+ return INVALID_OFFSET;
+ return offset();
+ }
+ void setPrev(int32_t offset) {
+ use(offset);
+ }
+ void bind() {
+ bound_ = true;
+
+ // These labels cannot be used after being bound.
+ offset_ = -1;
+ }
+};
+
+class CodeOffset
+{
+ size_t offset_;
+
+ static const size_t NOT_BOUND = size_t(-1);
+
+ public:
+ explicit CodeOffset(size_t offset) : offset_(offset) {}
+ CodeOffset() : offset_(NOT_BOUND) {}
+
+ size_t offset() const {
+ MOZ_ASSERT(bound());
+ return offset_;
+ }
+
+ void bind(size_t offset) {
+ MOZ_ASSERT(!bound());
+ offset_ = offset;
+ MOZ_ASSERT(bound());
+ }
+ bool bound() const {
+ return offset_ != NOT_BOUND;
+ }
+
+ void offsetBy(size_t delta) {
+ MOZ_ASSERT(bound());
+ MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
+ offset_ += delta;
+ }
+};
+
+// A code label contains an absolute reference to a point in the code. Thus, it
+// cannot be patched until after linking.
+// When the source label is resolved into a memory address, this address is
+// patched into the destination address.
+class CodeLabel
+{
+ // The destination position, where the absolute reference should get
+ // patched into.
+ CodeOffset patchAt_;
+
+ // The source label (relative) in the code to where the destination should
+ // get patched to.
+ CodeOffset target_;
+
+ public:
+ CodeLabel()
+ { }
+ explicit CodeLabel(const CodeOffset& patchAt)
+ : patchAt_(patchAt)
+ { }
+ CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
+ : patchAt_(patchAt),
+ target_(target)
+ { }
+ CodeOffset* patchAt() {
+ return &patchAt_;
+ }
+ CodeOffset* target() {
+ return &target_;
+ }
+ void offsetBy(size_t delta) {
+ patchAt_.offsetBy(delta);
+ target_.offsetBy(delta);
+ }
+};
+
+// Location of a jump or label in a generated JitCode block, relative to the
+// start of the block.
+
+class CodeOffsetJump
+{
+ size_t offset_;
+
+#ifdef JS_SMALL_BRANCH
+ size_t jumpTableIndex_;
+#endif
+
+ public:
+
+#ifdef JS_SMALL_BRANCH
+ CodeOffsetJump(size_t offset, size_t jumpTableIndex)
+ : offset_(offset), jumpTableIndex_(jumpTableIndex)
+ {}
+ size_t jumpTableIndex() const {
+ return jumpTableIndex_;
+ }
+#else
+ explicit CodeOffsetJump(size_t offset) : offset_(offset) {}
+#endif
+
+ CodeOffsetJump() {
+ mozilla::PodZero(this);
+ }
+
+ size_t offset() const {
+ return offset_;
+ }
+ void fixup(MacroAssembler* masm);
+};
+
+// Absolute location of a jump or a label in some generated JitCode block.
+// Can also encode a CodeOffset{Jump,Label}, such that the offset is initially
+// set and the absolute location later filled in after the final JitCode is
+// allocated.
+
+class CodeLocationJump
+{
+ uint8_t* raw_;
+#ifdef DEBUG
+ enum State { Uninitialized, Absolute, Relative };
+ State state_;
+ void setUninitialized() {
+ state_ = Uninitialized;
+ }
+ void setAbsolute() {
+ state_ = Absolute;
+ }
+ void setRelative() {
+ state_ = Relative;
+ }
+#else
+ void setUninitialized() const {
+ }
+ void setAbsolute() const {
+ }
+ void setRelative() const {
+ }
+#endif
+
+#ifdef JS_SMALL_BRANCH
+ uint8_t* jumpTableEntry_;
+#endif
+
+ public:
+ CodeLocationJump() {
+ raw_ = nullptr;
+ setUninitialized();
+#ifdef JS_SMALL_BRANCH
+ jumpTableEntry_ = (uint8_t*) uintptr_t(0xdeadab1e);
+#endif
+ }
+ CodeLocationJump(JitCode* code, CodeOffsetJump base) {
+ *this = base;
+ repoint(code);
+ }
+
+ void operator = (CodeOffsetJump base) {
+ raw_ = (uint8_t*) base.offset();
+ setRelative();
+#ifdef JS_SMALL_BRANCH
+ jumpTableEntry_ = (uint8_t*) base.jumpTableIndex();
+#endif
+ }
+
+ void repoint(JitCode* code, MacroAssembler* masm = nullptr);
+
+ uint8_t* raw() const {
+ MOZ_ASSERT(state_ == Absolute);
+ return raw_;
+ }
+ uint8_t* offset() const {
+ MOZ_ASSERT(state_ == Relative);
+ return raw_;
+ }
+
+#ifdef JS_SMALL_BRANCH
+ uint8_t* jumpTableEntry() const {
+ MOZ_ASSERT(state_ == Absolute);
+ return jumpTableEntry_;
+ }
+#endif
+};
+
+class CodeLocationLabel
+{
+ uint8_t* raw_;
+#ifdef DEBUG
+ enum State { Uninitialized, Absolute, Relative };
+ State state_;
+ void setUninitialized() {
+ state_ = Uninitialized;
+ }
+ void setAbsolute() {
+ state_ = Absolute;
+ }
+ void setRelative() {
+ state_ = Relative;
+ }
+#else
+ void setUninitialized() const {
+ }
+ void setAbsolute() const {
+ }
+ void setRelative() const {
+ }
+#endif
+
+ public:
+ CodeLocationLabel() {
+ raw_ = nullptr;
+ setUninitialized();
+ }
+ CodeLocationLabel(JitCode* code, CodeOffset base) {
+ *this = base;
+ repoint(code);
+ }
+ explicit CodeLocationLabel(JitCode* code) {
+ raw_ = code->raw();
+ setAbsolute();
+ }
+ explicit CodeLocationLabel(uint8_t* raw) {
+ raw_ = raw;
+ setAbsolute();
+ }
+
+ void operator = (CodeOffset base) {
+ raw_ = (uint8_t*)base.offset();
+ setRelative();
+ }
+ ptrdiff_t operator - (const CodeLocationLabel& other) {
+ return raw_ - other.raw_;
+ }
+
+ void repoint(JitCode* code, MacroAssembler* masm = nullptr);
+
+#ifdef DEBUG
+ bool isSet() const {
+ return state_ != Uninitialized;
+ }
+#endif
+
+ uint8_t* raw() const {
+ MOZ_ASSERT(state_ == Absolute);
+ return raw_;
+ }
+ uint8_t* offset() const {
+ MOZ_ASSERT(state_ == Relative);
+ return raw_;
+ }
+};
+
+} // namespace jit
+
+namespace wasm {
+
+// As an invariant across architectures, within wasm code:
+// $sp % WasmStackAlignment = (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment
+// Thus, wasm::Frame represents the bytes pushed after the call (which occurred
+// with a WasmStackAlignment-aligned StackPointer) that are not included in
+// masm.framePushed.
+
+struct Frame
+{
+ // The caller's saved frame pointer. In non-profiling mode, internal
+ // wasm-to-wasm calls don't update fp and thus don't save the caller's
+ // frame pointer; the space is reserved, however, so that profiling mode can
+ // reuse the same function body without recompiling.
+ uint8_t* callerFP;
+
+ // The return address pushed by the call (in the case of ARM/MIPS the return
+ // address is pushed by the first instruction of the prologue).
+ void* returnAddress;
+};
+
+static_assert(sizeof(Frame) == 2 * sizeof(void*), "?!");
+static const uint32_t FrameBytesAfterReturnAddress = sizeof(void*);
+
+// Represents an instruction to be patched and the intended pointee. These
+// links are accumulated in the MacroAssembler, but patching is done outside
+// the MacroAssembler (in Module::staticallyLink).
+
+struct SymbolicAccess
+{
+ SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
+ : patchAt(patchAt), target(target) {}
+
+ jit::CodeOffset patchAt;
+ SymbolicAddress target;
+};
+
+typedef Vector<SymbolicAccess, 0, SystemAllocPolicy> SymbolicAccessVector;
+
+// Describes a single wasm or asm.js memory access for the purpose of generating
+// code and metadata.
+
+class MemoryAccessDesc
+{
+ uint32_t offset_;
+ uint32_t align_;
+ Scalar::Type type_;
+ unsigned numSimdElems_;
+ jit::MemoryBarrierBits barrierBefore_;
+ jit::MemoryBarrierBits barrierAfter_;
+ mozilla::Maybe<wasm::TrapOffset> trapOffset_;
+
+ public:
+ explicit MemoryAccessDesc(Scalar::Type type, uint32_t align, uint32_t offset,
+ mozilla::Maybe<TrapOffset> trapOffset,
+ unsigned numSimdElems = 0,
+ jit::MemoryBarrierBits barrierBefore = jit::MembarNobits,
+ jit::MemoryBarrierBits barrierAfter = jit::MembarNobits)
+ : offset_(offset),
+ align_(align),
+ type_(type),
+ numSimdElems_(numSimdElems),
+ barrierBefore_(barrierBefore),
+ barrierAfter_(barrierAfter),
+ trapOffset_(trapOffset)
+ {
+ MOZ_ASSERT(Scalar::isSimdType(type) == (numSimdElems > 0));
+ MOZ_ASSERT(numSimdElems <= jit::ScalarTypeToLength(type));
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+ MOZ_ASSERT_IF(isSimd(), hasTrap());
+ MOZ_ASSERT_IF(isAtomic(), hasTrap());
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t align() const { return align_; }
+ Scalar::Type type() const { return type_; }
+ unsigned byteSize() const {
+ return Scalar::isSimdType(type())
+ ? Scalar::scalarByteSize(type()) * numSimdElems()
+ : Scalar::byteSize(type());
+ }
+ unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
+ jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
+ jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
+ bool hasTrap() const { return !!trapOffset_; }
+ TrapOffset trapOffset() const { return *trapOffset_; }
+ bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
+ bool isSimd() const { return Scalar::isSimdType(type_); }
+ bool isUnaligned() const { return align() && align() < byteSize(); }
+ bool isPlainAsmJS() const { return !hasTrap(); }
+
+ void clearOffset() { offset_ = 0; }
+};
+
+// Summarizes a global access for a mutable (in asm.js) or immutable value (in
+// asm.js or the wasm MVP) that needs to get patched later.
+
+struct GlobalAccess
+{
+ GlobalAccess(jit::CodeOffset patchAt, unsigned globalDataOffset)
+ : patchAt(patchAt), globalDataOffset(globalDataOffset)
+ {}
+
+ jit::CodeOffset patchAt;
+ unsigned globalDataOffset;
+};
+
+typedef Vector<GlobalAccess, 0, SystemAllocPolicy> GlobalAccessVector;
+
+// The TrapDesc struct describes a wasm trap that is about to be emitted. This
+// includes the logical wasm bytecode offset to report, the kind of instruction
+// causing the trap, and the stack depth right before control is transferred to
+// the trap out-of-line path.
+
+struct TrapDesc : TrapOffset
+{
+ enum Kind { Jump, MemoryAccess };
+ Kind kind;
+ Trap trap;
+ uint32_t framePushed;
+
+ TrapDesc(TrapOffset offset, Trap trap, uint32_t framePushed, Kind kind = Jump)
+ : TrapOffset(offset), kind(kind), trap(trap), framePushed(framePushed)
+ {}
+};
+
+// A TrapSite captures all relevant information at the point of emitting the
+// in-line trapping instruction for the purpose of generating the out-of-line
+// trap code (at the end of the function).
+
+struct TrapSite : TrapDesc
+{
+ uint32_t codeOffset;
+
+ TrapSite(TrapDesc trap, uint32_t codeOffset)
+ : TrapDesc(trap), codeOffset(codeOffset)
+ {}
+};
+
+typedef Vector<TrapSite, 0, SystemAllocPolicy> TrapSiteVector;
+
+// A TrapFarJump records the offset of a jump that needs to be patched to a trap
+// exit at the end of the module when trap exits are emitted.
+
+struct TrapFarJump
+{
+ Trap trap;
+ jit::CodeOffset jump;
+
+ TrapFarJump(Trap trap, jit::CodeOffset jump)
+ : trap(trap), jump(jump)
+ {}
+
+ void offsetBy(size_t delta) {
+ jump.offsetBy(delta);
+ }
+};
+
+typedef Vector<TrapFarJump, 0, SystemAllocPolicy> TrapFarJumpVector;
+
+} // namespace wasm
+
+namespace jit {
+
+// The base class of all Assemblers for all archs.
+class AssemblerShared
+{
+ wasm::CallSiteAndTargetVector callSites_;
+ wasm::TrapSiteVector trapSites_;
+ wasm::TrapFarJumpVector trapFarJumps_;
+ wasm::MemoryAccessVector memoryAccesses_;
+ wasm::MemoryPatchVector memoryPatches_;
+ wasm::BoundsCheckVector boundsChecks_;
+ wasm::GlobalAccessVector globalAccesses_;
+ wasm::SymbolicAccessVector symbolicAccesses_;
+
+ protected:
+ Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
+
+ bool enoughMemory_;
+ bool embedsNurseryPointers_;
+
+ public:
+ AssemblerShared()
+ : enoughMemory_(true),
+ embedsNurseryPointers_(false)
+ {}
+
+ void propagateOOM(bool success) {
+ enoughMemory_ &= success;
+ }
+
+ void setOOM() {
+ enoughMemory_ = false;
+ }
+
+ bool oom() const {
+ return !enoughMemory_;
+ }
+
+ bool embedsNurseryPointers() const {
+ return embedsNurseryPointers_;
+ }
+
+ template <typename... Args>
+ void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
+ Args&&... args)
+ {
+ // framePushed does not include sizeof(wasm:Frame), so add it in explicitly when
+ // setting the CallSite::stackDepth.
+ wasm::CallSite cs(desc, retAddr.offset(), framePushed + sizeof(wasm::Frame));
+ enoughMemory_ &= callSites_.emplaceBack(cs, mozilla::Forward<Args>(args)...);
+ }
+ wasm::CallSiteAndTargetVector& callSites() { return callSites_; }
+
+ void append(wasm::TrapSite trapSite) {
+ enoughMemory_ &= trapSites_.append(trapSite);
+ }
+ const wasm::TrapSiteVector& trapSites() const { return trapSites_; }
+ void clearTrapSites() { trapSites_.clear(); }
+
+ void append(wasm::TrapFarJump jmp) {
+ enoughMemory_ &= trapFarJumps_.append(jmp);
+ }
+ const wasm::TrapFarJumpVector& trapFarJumps() const { return trapFarJumps_; }
+
+ void append(wasm::MemoryAccess access) { enoughMemory_ &= memoryAccesses_.append(access); }
+ wasm::MemoryAccessVector&& extractMemoryAccesses() { return Move(memoryAccesses_); }
+
+ void append(const wasm::MemoryAccessDesc& access, size_t codeOffset, size_t framePushed) {
+ if (access.hasTrap()) {
+ // If a memory access is trapping (wasm, SIMD.js, Atomics), create a
+ // TrapSite now which will generate a trap out-of-line path at the end
+ // of the function which will *then* append a MemoryAccess.
+ wasm::TrapDesc trap(access.trapOffset(), wasm::Trap::OutOfBounds, framePushed,
+ wasm::TrapSite::MemoryAccess);
+ append(wasm::TrapSite(trap, codeOffset));
+ } else {
+ // Otherwise, this is a plain asm.js access. On WASM_HUGE_MEMORY
+ // platforms, asm.js uses signal handlers to remove bounds checks
+ // and thus requires a MemoryAccess.
+ MOZ_ASSERT(access.isPlainAsmJS());
+#ifdef WASM_HUGE_MEMORY
+ append(wasm::MemoryAccess(codeOffset));
+#endif
+ }
+ }
+
+ void append(wasm::MemoryPatch patch) { enoughMemory_ &= memoryPatches_.append(patch); }
+ wasm::MemoryPatchVector&& extractMemoryPatches() { return Move(memoryPatches_); }
+
+ void append(wasm::BoundsCheck check) { enoughMemory_ &= boundsChecks_.append(check); }
+ wasm::BoundsCheckVector&& extractBoundsChecks() { return Move(boundsChecks_); }
+
+ void append(wasm::GlobalAccess access) { enoughMemory_ &= globalAccesses_.append(access); }
+ const wasm::GlobalAccessVector& globalAccesses() const { return globalAccesses_; }
+
+ void append(wasm::SymbolicAccess access) { enoughMemory_ &= symbolicAccesses_.append(access); }
+ size_t numSymbolicAccesses() const { return symbolicAccesses_.length(); }
+ wasm::SymbolicAccess symbolicAccess(size_t i) const { return symbolicAccesses_[i]; }
+
+ static bool canUseInSingleByteInstruction(Register reg) { return true; }
+
+ void addCodeLabel(CodeLabel label) {
+ propagateOOM(codeLabels_.append(label));
+ }
+ size_t numCodeLabels() const {
+ return codeLabels_.length();
+ }
+ CodeLabel codeLabel(size_t i) {
+ return codeLabels_[i];
+ }
+
+ // Merge this assembler with the other one, invalidating it, by shifting all
+ // offsets by a delta.
+ bool asmMergeWith(size_t delta, const AssemblerShared& other) {
+ size_t i = callSites_.length();
+ enoughMemory_ &= callSites_.appendAll(other.callSites_);
+ for (; i < callSites_.length(); i++)
+ callSites_[i].offsetReturnAddressBy(delta);
+
+ MOZ_ASSERT(other.trapSites_.empty(), "should have been cleared by wasmEmitTrapOutOfLineCode");
+
+ i = trapFarJumps_.length();
+ enoughMemory_ &= trapFarJumps_.appendAll(other.trapFarJumps_);
+ for (; i < trapFarJumps_.length(); i++)
+ trapFarJumps_[i].offsetBy(delta);
+
+ i = memoryAccesses_.length();
+ enoughMemory_ &= memoryAccesses_.appendAll(other.memoryAccesses_);
+ for (; i < memoryAccesses_.length(); i++)
+ memoryAccesses_[i].offsetBy(delta);
+
+ i = memoryPatches_.length();
+ enoughMemory_ &= memoryPatches_.appendAll(other.memoryPatches_);
+ for (; i < memoryPatches_.length(); i++)
+ memoryPatches_[i].offsetBy(delta);
+
+ i = boundsChecks_.length();
+ enoughMemory_ &= boundsChecks_.appendAll(other.boundsChecks_);
+ for (; i < boundsChecks_.length(); i++)
+ boundsChecks_[i].offsetBy(delta);
+
+ i = globalAccesses_.length();
+ enoughMemory_ &= globalAccesses_.appendAll(other.globalAccesses_);
+ for (; i < globalAccesses_.length(); i++)
+ globalAccesses_[i].patchAt.offsetBy(delta);
+
+ i = symbolicAccesses_.length();
+ enoughMemory_ &= symbolicAccesses_.appendAll(other.symbolicAccesses_);
+ for (; i < symbolicAccesses_.length(); i++)
+ symbolicAccesses_[i].patchAt.offsetBy(delta);
+
+ i = codeLabels_.length();
+ enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
+ for (; i < codeLabels_.length(); i++)
+ codeLabels_[i].offsetBy(delta);
+
+ return !oom();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Assembler_shared_h */
diff --git a/js/src/jit/shared/BaselineCompiler-shared.cpp b/js/src/jit/shared/BaselineCompiler-shared.cpp
new file mode 100644
index 000000000..5342eeb3f
--- /dev/null
+++ b/js/src/jit/shared/BaselineCompiler-shared.cpp
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+#include "jit/BaselineIC.h"
+#include "jit/VMFunctions.h"
+
+#include "jsscriptinlines.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerShared::BaselineCompilerShared(JSContext* cx, TempAllocator& alloc, JSScript* script)
+ : cx(cx),
+ script(script),
+ pc(script->code()),
+ ionCompileable_(jit::IsIonEnabled(cx) && CanIonCompileScript(cx, script, false)),
+ ionOSRCompileable_(jit::IsIonEnabled(cx) && CanIonCompileScript(cx, script, true)),
+ compileDebugInstrumentation_(script->isDebuggee()),
+ alloc_(alloc),
+ analysis_(alloc, script),
+ frame(script, masm),
+ stubSpace_(),
+ icEntries_(),
+ pcMappingEntries_(),
+ icLoadLabels_(),
+ pushedBeforeCall_(0),
+#ifdef DEBUG
+ inCall_(false),
+#endif
+ spsPushToggleOffset_(),
+ profilerEnterFrameToggleOffset_(),
+ profilerExitFrameToggleOffset_(),
+ traceLoggerToggleOffsets_(cx),
+ traceLoggerScriptTextIdOffset_()
+{ }
+
+void
+BaselineCompilerShared::prepareVMCall()
+{
+ pushedBeforeCall_ = masm.framePushed();
+#ifdef DEBUG
+ inCall_ = true;
+#endif
+
+ // Ensure everything is synced.
+ frame.syncStack(0);
+
+ // Save the frame pointer.
+ masm.Push(BaselineFrameReg);
+}
+
+bool
+BaselineCompilerShared::callVM(const VMFunction& fun, CallVMPhase phase)
+{
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(fun);
+ if (!code)
+ return false;
+
+#ifdef DEBUG
+ // Assert prepareVMCall() has been called.
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+
+ // Assert the frame does not have an override pc when we're executing JIT code.
+ {
+ Label ok;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_OVERRIDE_PC), &ok);
+ masm.assumeUnreachable("BaselineFrame shouldn't override pc when executing JIT code");
+ masm.bind(&ok);
+ }
+#endif
+
+ // Compute argument size. Note that this include the size of the frame pointer
+ // pushed by prepareVMCall.
+ uint32_t argSize = fun.explicitStackSlots() * sizeof(void*) + sizeof(void*);
+
+ // Assert all arguments were pushed.
+ MOZ_ASSERT(masm.framePushed() - pushedBeforeCall_ == argSize);
+
+ Address frameSizeAddress(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize());
+ uint32_t frameVals = frame.nlocals() + frame.stackDepth();
+ uint32_t frameBaseSize = BaselineFrame::FramePointerOffset + BaselineFrame::Size();
+ uint32_t frameFullSize = frameBaseSize + (frameVals * sizeof(Value));
+ if (phase == POST_INITIALIZE) {
+ masm.store32(Imm32(frameFullSize), frameSizeAddress);
+ uint32_t descriptor = MakeFrameDescriptor(frameFullSize + argSize, JitFrame_BaselineJS,
+ ExitFrameLayout::Size());
+ masm.push(Imm32(descriptor));
+
+ } else if (phase == PRE_INITIALIZE) {
+ masm.store32(Imm32(frameBaseSize), frameSizeAddress);
+ uint32_t descriptor = MakeFrameDescriptor(frameBaseSize + argSize, JitFrame_BaselineJS,
+ ExitFrameLayout::Size());
+ masm.push(Imm32(descriptor));
+
+ } else {
+ MOZ_ASSERT(phase == CHECK_OVER_RECURSED);
+ Label afterWrite;
+ Label writePostInitialize;
+
+ // If OVER_RECURSED is set, then frame locals haven't been pushed yet.
+ masm.branchTest32(Assembler::Zero,
+ frame.addressOfFlags(),
+ Imm32(BaselineFrame::OVER_RECURSED),
+ &writePostInitialize);
+
+ masm.move32(Imm32(frameBaseSize), ICTailCallReg);
+ masm.jump(&afterWrite);
+
+ masm.bind(&writePostInitialize);
+ masm.move32(Imm32(frameFullSize), ICTailCallReg);
+
+ masm.bind(&afterWrite);
+ masm.store32(ICTailCallReg, frameSizeAddress);
+ masm.add32(Imm32(argSize), ICTailCallReg);
+ masm.makeFrameDescriptor(ICTailCallReg, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.push(ICTailCallReg);
+ }
+ MOZ_ASSERT(fun.expectTailCall == NonTailCall);
+ // Perform the call.
+ masm.call(code);
+ uint32_t callOffset = masm.currentOffset();
+ masm.pop(BaselineFrameReg);
+
+#ifdef DEBUG
+ // Assert the frame does not have an override pc when we're executing JIT code.
+ {
+ Label ok;
+ masm.branchTest32(Assembler::Zero, frame.addressOfFlags(),
+ Imm32(BaselineFrame::HAS_OVERRIDE_PC), &ok);
+ masm.assumeUnreachable("BaselineFrame shouldn't override pc after VM call");
+ masm.bind(&ok);
+ }
+#endif
+
+ // Add a fake ICEntry (without stubs), so that the return offset to
+ // pc mapping works.
+ return appendICEntry(ICEntry::Kind_CallVM, callOffset);
+}
diff --git a/js/src/jit/shared/BaselineCompiler-shared.h b/js/src/jit/shared/BaselineCompiler-shared.h
new file mode 100644
index 000000000..7d1402a9d
--- /dev/null
+++ b/js/src/jit/shared/BaselineCompiler-shared.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_BaselineCompiler_shared_h
+#define jit_shared_BaselineCompiler_shared_h
+
+#include "jit/BaselineFrameInfo.h"
+#include "jit/BaselineIC.h"
+#include "jit/BytecodeAnalysis.h"
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerShared
+{
+ protected:
+ JSContext* cx;
+ JSScript* script;
+ jsbytecode* pc;
+ MacroAssembler masm;
+ bool ionCompileable_;
+ bool ionOSRCompileable_;
+ bool compileDebugInstrumentation_;
+
+ TempAllocator& alloc_;
+ BytecodeAnalysis analysis_;
+ FrameInfo frame;
+
+ FallbackICStubSpace stubSpace_;
+ js::Vector<BaselineICEntry, 16, SystemAllocPolicy> icEntries_;
+
+ // Stores the native code offset for a bytecode pc.
+ struct PCMappingEntry
+ {
+ uint32_t pcOffset;
+ uint32_t nativeOffset;
+ PCMappingSlotInfo slotInfo;
+
+ // If set, insert a PCMappingIndexEntry before encoding the
+ // current entry.
+ bool addIndexEntry;
+ };
+
+ js::Vector<PCMappingEntry, 16, SystemAllocPolicy> pcMappingEntries_;
+
+ // Labels for the 'movWithPatch' for loading IC entry pointers in
+ // the generated IC-calling code in the main jitcode. These need
+ // to be patched with the actual icEntry offsets after the BaselineScript
+ // has been allocated.
+ struct ICLoadLabel {
+ size_t icEntry;
+ CodeOffset label;
+ };
+ js::Vector<ICLoadLabel, 16, SystemAllocPolicy> icLoadLabels_;
+
+ uint32_t pushedBeforeCall_;
+#ifdef DEBUG
+ bool inCall_;
+#endif
+
+ CodeOffset spsPushToggleOffset_;
+ CodeOffset profilerEnterFrameToggleOffset_;
+ CodeOffset profilerExitFrameToggleOffset_;
+
+ Vector<CodeOffset> traceLoggerToggleOffsets_;
+ CodeOffset traceLoggerScriptTextIdOffset_;
+
+ BaselineCompilerShared(JSContext* cx, TempAllocator& alloc, JSScript* script);
+
+ BaselineICEntry* allocateICEntry(ICStub* stub, ICEntry::Kind kind) {
+ if (!stub)
+ return nullptr;
+
+ // Create the entry and add it to the vector.
+ if (!icEntries_.append(BaselineICEntry(script->pcToOffset(pc), kind))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ BaselineICEntry& vecEntry = icEntries_.back();
+
+ // Set the first stub for the IC entry to the fallback stub
+ vecEntry.setFirstStub(stub);
+
+ // Return pointer to the IC entry
+ return &vecEntry;
+ }
+
+ // Append an ICEntry without a stub.
+ bool appendICEntry(ICEntry::Kind kind, uint32_t returnOffset) {
+ BaselineICEntry entry(script->pcToOffset(pc), kind);
+ entry.setReturnOffset(CodeOffset(returnOffset));
+ if (!icEntries_.append(entry)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ bool addICLoadLabel(CodeOffset label) {
+ MOZ_ASSERT(!icEntries_.empty());
+ ICLoadLabel loadLabel;
+ loadLabel.label = label;
+ loadLabel.icEntry = icEntries_.length() - 1;
+ if (!icLoadLabels_.append(loadLabel)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ JSFunction* function() const {
+ // Not delazifying here is ok as the function is guaranteed to have
+ // been delazified before compilation started.
+ return script->functionNonDelazifying();
+ }
+
+ ModuleObject* module() const {
+ return script->module();
+ }
+
+ PCMappingSlotInfo getStackTopSlotInfo() {
+ MOZ_ASSERT(frame.numUnsyncedSlots() <= 2);
+ switch (frame.numUnsyncedSlots()) {
+ case 0:
+ return PCMappingSlotInfo::MakeSlotInfo();
+ case 1:
+ return PCMappingSlotInfo::MakeSlotInfo(PCMappingSlotInfo::ToSlotLocation(frame.peek(-1)));
+ case 2:
+ default:
+ return PCMappingSlotInfo::MakeSlotInfo(PCMappingSlotInfo::ToSlotLocation(frame.peek(-1)),
+ PCMappingSlotInfo::ToSlotLocation(frame.peek(-2)));
+ }
+ }
+
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+ }
+ void prepareVMCall();
+
+ enum CallVMPhase {
+ POST_INITIALIZE,
+ PRE_INITIALIZE,
+ CHECK_OVER_RECURSED
+ };
+ bool callVM(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE);
+
+ bool callVMNonOp(const VMFunction& fun, CallVMPhase phase=POST_INITIALIZE) {
+ if (!callVM(fun, phase))
+ return false;
+ icEntries_.back().setFakeKind(ICEntry::Kind_NonOpCallVM);
+ return true;
+ }
+
+ public:
+ BytecodeAnalysis& analysis() {
+ return analysis_;
+ }
+
+ void setCompileDebugInstrumentation() {
+ compileDebugInstrumentation_ = true;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_BaselineCompiler_shared_h */
diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h
new file mode 100644
index 000000000..662e2fa5d
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -0,0 +1,437 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_inl_h
+#define jit_shared_CodeGenerator_shared_inl_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+#include "jit/Disassembler.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+static inline bool
+IsConstant(const LInt64Allocation& a)
+{
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue())
+ return true;
+ if (a.high().isConstantIndex())
+ return true;
+#else
+ if (a.value().isConstantValue())
+ return true;
+ if (a.value().isConstantIndex())
+ return true;
+#endif
+ return false;
+}
+
+static inline int32_t
+ToInt32(const LAllocation* a)
+{
+ if (a->isConstantValue())
+ return a->toConstant()->toInt32();
+ if (a->isConstantIndex())
+ return a->toConstantIndex()->index();
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t
+ToInt64(const LAllocation* a)
+{
+ if (a->isConstantValue())
+ return a->toConstant()->toInt64();
+ if (a->isConstantIndex())
+ return a->toConstantIndex()->index();
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t
+ToInt64(const LInt64Allocation& a)
+{
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue())
+ return a.high().toConstant()->toInt64();
+ if (a.high().isConstantIndex())
+ return a.high().toConstantIndex()->index();
+#else
+ if (a.value().isConstantValue())
+ return a.value().toConstant()->toInt64();
+ if (a.value().isConstantIndex())
+ return a.value().toConstantIndex()->index();
+#endif
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline double
+ToDouble(const LAllocation* a)
+{
+ return a->toConstant()->numberToDouble();
+}
+
+static inline Register
+ToRegister(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isGeneralReg());
+ return a.toGeneralReg()->reg();
+}
+
+static inline Register
+ToRegister(const LAllocation* a)
+{
+ return ToRegister(*a);
+}
+
+static inline Register
+ToRegister(const LDefinition* def)
+{
+ return ToRegister(*def->output());
+}
+
+static inline Register64
+ToOutRegister64(LInstruction* ins)
+{
+#if JS_BITS_PER_WORD == 32
+ Register loReg = ToRegister(ins->getDef(INT64LOW_INDEX));
+ Register hiReg = ToRegister(ins->getDef(INT64HIGH_INDEX));
+ return Register64(hiReg, loReg);
+#else
+ return Register64(ToRegister(ins->getDef(0)));
+#endif
+}
+
+static inline Register64
+ToRegister64(const LInt64Allocation& a)
+{
+#if JS_BITS_PER_WORD == 32
+ return Register64(ToRegister(a.high()), ToRegister(a.low()));
+#else
+ return Register64(ToRegister(a.value()));
+#endif
+}
+
+static inline Register
+ToTempRegisterOrInvalid(const LDefinition* def)
+{
+ if (def->isBogusTemp())
+ return InvalidReg;
+ return ToRegister(def);
+}
+
+static inline Register
+ToTempUnboxRegister(const LDefinition* def)
+{
+ return ToTempRegisterOrInvalid(def);
+}
+
+static inline Register
+ToRegisterOrInvalid(const LDefinition* a)
+{
+ return a ? ToRegister(a) : InvalidReg;
+}
+
+static inline FloatRegister
+ToFloatRegister(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isFloatReg());
+ return a.toFloatReg()->reg();
+}
+
+static inline FloatRegister
+ToFloatRegister(const LAllocation* a)
+{
+ return ToFloatRegister(*a);
+}
+
+static inline FloatRegister
+ToFloatRegister(const LDefinition* def)
+{
+ return ToFloatRegister(*def->output());
+}
+
+static inline FloatRegister
+ToTempFloatRegisterOrInvalid(const LDefinition* def)
+{
+ if (def->isBogusTemp())
+ return InvalidFloatReg;
+ return ToFloatRegister(def);
+}
+
+static inline AnyRegister
+ToAnyRegister(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isGeneralReg() || a.isFloatReg());
+ if (a.isGeneralReg())
+ return AnyRegister(ToRegister(a));
+ return AnyRegister(ToFloatRegister(a));
+}
+
+static inline AnyRegister
+ToAnyRegister(const LAllocation* a)
+{
+ return ToAnyRegister(*a);
+}
+
+static inline AnyRegister
+ToAnyRegister(const LDefinition* def)
+{
+ return ToAnyRegister(def->output());
+}
+
+static inline RegisterOrInt32Constant
+ToRegisterOrInt32Constant(const LAllocation* a)
+{
+ if (a->isConstant())
+ return RegisterOrInt32Constant(ToInt32(a));
+ return RegisterOrInt32Constant(ToRegister(a));
+}
+
+static inline ValueOperand
+GetValueOutput(LInstruction* ins)
+{
+#if defined(JS_NUNBOX32)
+ return ValueOperand(ToRegister(ins->getDef(TYPE_INDEX)),
+ ToRegister(ins->getDef(PAYLOAD_INDEX)));
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(ToRegister(ins->getDef(0)));
+#else
+#error "Unknown"
+#endif
+}
+
+static inline ValueOperand
+GetTempValue(Register type, Register payload)
+{
+#if defined(JS_NUNBOX32)
+ return ValueOperand(type, payload);
+#elif defined(JS_PUNBOX64)
+ (void)type;
+ return ValueOperand(payload);
+#else
+#error "Unknown"
+#endif
+}
+
+int32_t
+CodeGeneratorShared::ArgToStackOffset(int32_t slot) const
+{
+ return masm.framePushed() +
+ (gen->compilingWasm() ? sizeof(wasm::Frame) : sizeof(JitFrameLayout)) +
+ slot;
+}
+
+int32_t
+CodeGeneratorShared::CalleeStackOffset() const
+{
+ return masm.framePushed() + JitFrameLayout::offsetOfCalleeToken();
+}
+
+int32_t
+CodeGeneratorShared::SlotToStackOffset(int32_t slot) const
+{
+ MOZ_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount()));
+ int32_t offset = masm.framePushed() - frameInitialAdjustment_ - slot;
+ MOZ_ASSERT(offset >= 0);
+ return offset;
+}
+
+int32_t
+CodeGeneratorShared::StackOffsetToSlot(int32_t offset) const
+{
+ // See: SlotToStackOffset. This is used to convert pushed arguments
+ // to a slot index that safepoints can use.
+ //
+ // offset = framePushed - frameInitialAdjustment - slot
+ // offset + slot = framePushed - frameInitialAdjustment
+ // slot = framePushed - frameInitialAdjustement - offset
+ return masm.framePushed() - frameInitialAdjustment_ - offset;
+}
+
+// For argument construction for calls. Argslots are Value-sized.
+int32_t
+CodeGeneratorShared::StackOffsetOfPassedArg(int32_t slot) const
+{
+ // A slot of 0 is permitted only to calculate %esp offset for calls.
+ MOZ_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount()));
+ int32_t offset = masm.framePushed() -
+ graph.paddedLocalSlotsSize() -
+ (slot * sizeof(Value));
+
+ // Passed arguments go below A function's local stack storage.
+ // When arguments are being pushed, there is nothing important on the stack.
+ // Therefore, It is safe to push the arguments down arbitrarily. Pushing
+ // by sizeof(Value) is desirable since everything on the stack is a Value.
+ // Note that paddedLocalSlotCount() aligns to at least a Value boundary
+ // specifically to support this.
+ MOZ_ASSERT(offset >= 0);
+ MOZ_ASSERT(offset % sizeof(Value) == 0);
+ return offset;
+}
+
+int32_t
+CodeGeneratorShared::ToStackOffset(LAllocation a) const
+{
+ if (a.isArgument())
+ return ArgToStackOffset(a.toArgument()->index());
+ return SlotToStackOffset(a.toStackSlot()->slot());
+}
+
+int32_t
+CodeGeneratorShared::ToStackOffset(const LAllocation* a) const
+{
+ return ToStackOffset(*a);
+}
+
+Address
+CodeGeneratorShared::ToAddress(const LAllocation& a)
+{
+ MOZ_ASSERT(a.isMemory());
+ return Address(masm.getStackPointer(), ToStackOffset(&a));
+}
+
+Address
+CodeGeneratorShared::ToAddress(const LAllocation* a)
+{
+ return ToAddress(*a);
+}
+
+void
+CodeGeneratorShared::saveLive(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PushRegsInMask(safepoint->liveRegs());
+}
+
+void
+CodeGeneratorShared::restoreLive(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMask(safepoint->liveRegs());
+}
+
+void
+CodeGeneratorShared::restoreLiveIgnore(LInstruction* ins, LiveRegisterSet ignore)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMaskIgnore(safepoint->liveRegs(), ignore);
+}
+
+void
+CodeGeneratorShared::saveLiveVolatile(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ LiveRegisterSet regs;
+ regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile());
+ masm.PushRegsInMask(regs);
+}
+
+void
+CodeGeneratorShared::restoreLiveVolatile(LInstruction* ins)
+{
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ LiveRegisterSet regs;
+ regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(), RegisterSet::Volatile());
+ masm.PopRegsInMask(regs);
+}
+
+void
+CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
+ Scalar::Type type, Operand mem, LAllocation alloc)
+{
+#ifdef DEBUG
+ using namespace Disassembler;
+
+ Disassembler::HeapAccess::Kind kind = isLoad ? HeapAccess::Load : HeapAccess::Store;
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ if (kind == HeapAccess::Load)
+ kind = HeapAccess::LoadSext32;
+ break;
+ default:
+ break;
+ }
+
+ OtherOperand op;
+ switch (type) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (!alloc.isConstant()) {
+ op = OtherOperand(ToRegister(alloc).encoding());
+ } else {
+ // x86 doesn't allow encoding an imm64 to memory move; the value
+ // is wrapped anyways.
+ int32_t i = ToInt32(&alloc);
+
+ // Sign-extend the immediate value out to 32 bits. We do this even
+ // for unsigned element types so that we match what the disassembly
+ // code does, as it doesn't know about signedness of stores.
+ unsigned shift = 32 - TypedArrayElemSize(type) * 8;
+ i = i << shift >> shift;
+ op = OtherOperand(i);
+ }
+ break;
+ case Scalar::Int64:
+ // Can't encode an imm64-to-memory move.
+ op = OtherOperand(ToRegister(alloc).encoding());
+ break;
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Float32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::Int32x4:
+ op = OtherOperand(ToFloatRegister(alloc).encoding());
+ break;
+ case Scalar::Uint8Clamped:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("Unexpected array type");
+ }
+
+ HeapAccess access(kind, TypedArrayElemSize(type), ComplexAddress(mem), op);
+ masm.verifyHeapAccessDisassembly(begin, end, access);
+#endif
+}
+
+void
+CodeGeneratorShared::verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc)
+{
+ verifyHeapAccessDisassembly(begin, end, true, type, mem, alloc);
+}
+
+void
+CodeGeneratorShared::verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc)
+{
+ verifyHeapAccessDisassembly(begin, end, false, type, mem, alloc);
+}
+
+inline bool
+CodeGeneratorShared::isGlobalObject(JSObject* object)
+{
+ // Calling object->is<GlobalObject>() is racy because this relies on
+ // checking the group and this can be changed while we are compiling off the
+ // main thread.
+ return object == gen->compartment->maybeGlobal();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_inl_h */
diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
new file mode 100644
index 000000000..ba5d9d2f5
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -0,0 +1,1865 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCaches.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/OptimizationTracking.h"
+#include "js/Conversions.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/JitFrames-inl.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+MacroAssembler&
+CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg)
+{
+ if (masmArg)
+ return *masmArg;
+ maybeMasm_.emplace();
+ return *maybeMasm_;
+}
+
+CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masmArg)
+ : maybeMasm_(),
+ masm(ensureMasm(masmArg)),
+ gen(gen),
+ graph(*graph),
+ current(nullptr),
+ snapshots_(),
+ recovers_(),
+ deoptTable_(nullptr),
+#ifdef DEBUG
+ pushedArgs_(0),
+#endif
+ lastOsiPointOffset_(0),
+ safepoints_(graph->totalSlotCount(), (gen->info().nargs() + 1) * sizeof(Value)),
+ returnLabel_(),
+ stubSpace_(),
+ nativeToBytecodeMap_(nullptr),
+ nativeToBytecodeMapSize_(0),
+ nativeToBytecodeTableOffset_(0),
+ nativeToBytecodeNumRegions_(0),
+ nativeToBytecodeScriptList_(nullptr),
+ nativeToBytecodeScriptListLength_(0),
+ trackedOptimizationsMap_(nullptr),
+ trackedOptimizationsMapSize_(0),
+ trackedOptimizationsRegionTableOffset_(0),
+ trackedOptimizationsTypesTableOffset_(0),
+ trackedOptimizationsAttemptsTableOffset_(0),
+ osrEntryOffset_(0),
+ skipArgCheckEntryOffset_(0),
+#ifdef CHECK_OSIPOINT_REGISTERS
+ checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
+#endif
+ frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
+ frameInitialAdjustment_(0)
+{
+ if (gen->isProfilerInstrumentationEnabled())
+ masm.enableProfilingInstrumentation();
+
+ if (gen->compilingWasm()) {
+ // Since wasm uses the system ABI which does not necessarily use a
+ // regular array where all slots are sizeof(Value), it maintains the max
+ // argument stack depth separately.
+ MOZ_ASSERT(graph->argumentSlotCount() == 0);
+ frameDepth_ += gen->wasmMaxStackArgBytes();
+
+ if (gen->usesSimd()) {
+ // If the function uses any SIMD then we may need to insert padding
+ // so that local slots are aligned for SIMD.
+ frameInitialAdjustment_ = ComputeByteAlignment(sizeof(wasm::Frame),
+ WasmStackAlignment);
+ frameDepth_ += frameInitialAdjustment_;
+ // Keep the stack aligned. Some SIMD sequences build values on the
+ // stack and need the stack aligned.
+ frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+ WasmStackAlignment);
+ } else if (gen->performsCall()) {
+ // An MWasmCall does not align the stack pointer at calls sites but
+ // instead relies on the a priori stack adjustment. This must be the
+ // last adjustment of frameDepth_.
+ frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+ WasmStackAlignment);
+ }
+
+ // FrameSizeClass is only used for bailing, which cannot happen in
+ // wasm code.
+ frameClass_ = FrameSizeClass::None();
+ } else {
+ frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
+ }
+}
+
+bool
+CodeGeneratorShared::generatePrologue()
+{
+ MOZ_ASSERT(masm.framePushed() == 0);
+ MOZ_ASSERT(!gen->compilingWasm());
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // If profiling, save the current frame pointer to a per-thread global field.
+ if (isProfilerInstrumentationEnabled())
+ masm.profilerEnterFrame(masm.getStackPointer(), CallTempReg0);
+
+ // Ensure that the Ion frame is properly aligned.
+ masm.assertStackAlignment(JitStackAlignment, 0);
+
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameSize());
+ masm.checkStackAlignment();
+
+ emitTracelogIonStart();
+ return true;
+}
+
+bool
+CodeGeneratorShared::generateEpilogue()
+{
+ MOZ_ASSERT(!gen->compilingWasm());
+ masm.bind(&returnLabel_);
+
+ emitTracelogIonStop();
+
+ masm.freeStack(frameSize());
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ // If profiling, reset the per-thread global lastJitFrame to point to
+ // the previous frame.
+ if (isProfilerInstrumentationEnabled())
+ masm.profilerExitFrame();
+
+ masm.ret();
+
+ // On systems that use a constant pool, this is a good time to emit.
+ masm.flushBuffer();
+ return true;
+}
+
+bool
+CodeGeneratorShared::generateOutOfLineCode()
+{
+ for (size_t i = 0; i < outOfLineCode_.length(); i++) {
+ // Add native => bytecode mapping entries for OOL sites.
+ // Not enabled on wasm yet since it doesn't contain bytecode mappings.
+ if (!gen->compilingWasm()) {
+ if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
+ return false;
+ }
+
+ if (!gen->alloc().ensureBallast())
+ return false;
+
+ JitSpew(JitSpew_Codegen, "# Emitting out of line code");
+
+ masm.setFramePushed(outOfLineCode_[i]->framePushed());
+ lastPC_ = outOfLineCode_[i]->pc();
+ outOfLineCode_[i]->bind(&masm);
+
+ outOfLineCode_[i]->generate(this);
+ }
+
+ return !masm.oom();
+}
+
+void
+CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir)
+{
+ MOZ_ASSERT(mir);
+ addOutOfLineCode(code, mir->trackedSite());
+}
+
+void
+CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site)
+{
+ code->setFramePushed(masm.framePushed());
+ code->setBytecodeSite(site);
+ MOZ_ASSERT_IF(!gen->compilingWasm(), code->script()->containsPC(code->pc()));
+ masm.propagateOOM(outOfLineCode_.append(code));
+}
+
+bool
+CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site)
+{
+ // Skip the table entirely if profiling is not enabled.
+ if (!isProfilerInstrumentationEnabled())
+ return true;
+
+ // Fails early if the last added instruction caused the macro assembler to
+ // run out of memory as continuity assumption below do not hold.
+ if (masm.oom())
+ return false;
+
+ MOZ_ASSERT(site);
+ MOZ_ASSERT(site->tree());
+ MOZ_ASSERT(site->pc());
+
+ InlineScriptTree* tree = site->tree();
+ jsbytecode* pc = site->pc();
+ uint32_t nativeOffset = masm.currentOffset();
+
+ MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
+
+ if (!nativeToBytecodeList_.empty()) {
+ size_t lastIdx = nativeToBytecodeList_.length() - 1;
+ NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
+
+ MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
+
+ // If the new entry is for the same inlineScriptTree and same
+ // bytecodeOffset, but the nativeOffset has changed, do nothing.
+ // The same site just generated some more code.
+ if (lastEntry.tree == tree && lastEntry.pc == pc) {
+ JitSpew(JitSpew_Profiling, " => In-place update [%" PRIuSIZE "-%" PRIu32 "]",
+ lastEntry.nativeOffset.offset(), nativeOffset);
+ return true;
+ }
+
+ // If the new entry is for the same native offset, then update the
+ // previous entry with the new bytecode site, since the previous
+ // bytecode site did not generate any native code.
+ if (lastEntry.nativeOffset.offset() == nativeOffset) {
+ lastEntry.tree = tree;
+ lastEntry.pc = pc;
+ JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
+
+ // This overwrite might have made the entry merge-able with a
+ // previous one. If so, merge it.
+ if (lastIdx > 0) {
+ NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
+ if (nextToLastEntry.tree == lastEntry.tree && nextToLastEntry.pc == lastEntry.pc) {
+ JitSpew(JitSpew_Profiling, " => Merging with previous region");
+ nativeToBytecodeList_.erase(&lastEntry);
+ }
+ }
+
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+ }
+ }
+
+ // Otherwise, some native code was generated for the previous bytecode site.
+ // Add a new entry for code that is about to be generated.
+ NativeToBytecode entry;
+ entry.nativeOffset = CodeOffset(nativeOffset);
+ entry.tree = tree;
+ entry.pc = pc;
+ if (!nativeToBytecodeList_.append(entry))
+ return false;
+
+ JitSpew(JitSpew_Profiling, " => Push new entry.");
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+}
+
+void
+CodeGeneratorShared::dumpNativeToBytecodeEntries()
+{
+#ifdef JS_JITSPEW
+ InlineScriptTree* topTree = gen->info().inlineScriptTree();
+ JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%" PRIuSIZE "\n",
+ topTree->script()->filename(), topTree->script()->lineno());
+ for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++)
+ dumpNativeToBytecodeEntry(i);
+#endif
+}
+
+void
+CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx)
+{
+#ifdef JS_JITSPEW
+ NativeToBytecode& ref = nativeToBytecodeList_[idx];
+ InlineScriptTree* tree = ref.tree;
+ JSScript* script = tree->script();
+ uint32_t nativeOffset = ref.nativeOffset.offset();
+ unsigned nativeDelta = 0;
+ unsigned pcDelta = 0;
+ if (idx + 1 < nativeToBytecodeList_.length()) {
+ NativeToBytecode* nextRef = &ref + 1;
+ nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
+ if (nextRef->tree == ref.tree)
+ pcDelta = nextRef->pc - ref.pc;
+ }
+ JitSpewStart(JitSpew_Profiling, " %08" PRIxSIZE " [+%-6d] => %-6ld [%-4d] {%-10s} (%s:%" PRIuSIZE,
+ ref.nativeOffset.offset(),
+ nativeDelta,
+ (long) (ref.pc - script->code()),
+ pcDelta,
+ CodeName[JSOp(*ref.pc)],
+ script->filename(), script->lineno());
+
+ for (tree = tree->caller(); tree; tree = tree->caller()) {
+ JitSpewCont(JitSpew_Profiling, " <= %s:%" PRIuSIZE, tree->script()->filename(),
+ tree->script()->lineno());
+ }
+ JitSpewCont(JitSpew_Profiling, ")");
+ JitSpewFin(JitSpew_Profiling);
+#endif
+}
+
+bool
+CodeGeneratorShared::addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
+{
+ if (!isOptimizationTrackingEnabled())
+ return true;
+
+ MOZ_ASSERT(optimizations);
+
+ uint32_t nativeOffset = masm.currentOffset();
+
+ if (!trackedOptimizations_.empty()) {
+ NativeToTrackedOptimizations& lastEntry = trackedOptimizations_.back();
+ MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= lastEntry.endOffset.offset());
+
+ // If we're still generating code for the same set of optimizations,
+ // we are done.
+ if (lastEntry.optimizations == optimizations)
+ return true;
+ }
+
+ // If we're generating code for a new set of optimizations, add a new
+ // entry.
+ NativeToTrackedOptimizations entry;
+ entry.startOffset = CodeOffset(nativeOffset);
+ entry.endOffset = CodeOffset(nativeOffset);
+ entry.optimizations = optimizations;
+ return trackedOptimizations_.append(entry);
+}
+
+void
+CodeGeneratorShared::extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
+{
+ if (!isOptimizationTrackingEnabled())
+ return;
+
+ uint32_t nativeOffset = masm.currentOffset();
+ NativeToTrackedOptimizations& entry = trackedOptimizations_.back();
+ MOZ_ASSERT(entry.optimizations == optimizations);
+ MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= entry.endOffset.offset());
+
+ entry.endOffset = CodeOffset(nativeOffset);
+
+ // If we generated no code, remove the last entry.
+ if (nativeOffset == entry.startOffset.offset())
+ trackedOptimizations_.popBack();
+}
+
+// see OffsetOfFrameSlot
+static inline int32_t
+ToStackIndex(LAllocation* a)
+{
+ if (a->isStackSlot()) {
+ MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
+ return a->toStackSlot()->slot();
+ }
+ return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
+}
+
+void
+CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
+ uint32_t* allocIndex)
+{
+ if (mir->isBox())
+ mir = mir->toBox()->getOperand(0);
+
+ MIRType type =
+ mir->isRecoveredOnBailout() ? MIRType::None :
+ mir->isUnused() ? MIRType::MagicOptimizedOut :
+ mir->type();
+
+ RValueAllocation alloc;
+
+ switch (type) {
+ case MIRType::None:
+ {
+ MOZ_ASSERT(mir->isRecoveredOnBailout());
+ uint32_t index = 0;
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ MNode** it = recoverInfo->begin();
+ MNode** end = recoverInfo->end();
+ while (it != end && mir != *it) {
+ ++it;
+ ++index;
+ }
+
+ // This MDefinition is recovered, thus it should be listed in the
+ // LRecoverInfo.
+ MOZ_ASSERT(it != end && mir == *it);
+
+ // Lambda should have a default value readable for iterating over the
+ // inner frames.
+ if (mir->isLambda()) {
+ MConstant* constant = mir->toLambda()->functionOperand();
+ uint32_t cstIndex;
+ masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &cstIndex));
+ alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
+ break;
+ }
+
+ alloc = RValueAllocation::RecoverInstruction(index);
+ break;
+ }
+ case MIRType::Undefined:
+ alloc = RValueAllocation::Undefined();
+ break;
+ case MIRType::Null:
+ alloc = RValueAllocation::Null();
+ break;
+ case MIRType::Int32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::Object:
+ case MIRType::ObjectOrNull:
+ case MIRType::Boolean:
+ case MIRType::Double:
+ {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ JSValueType valueType =
+ (type == MIRType::ObjectOrNull) ? JSVAL_TYPE_OBJECT : ValueTypeFromMIRType(type);
+
+ MOZ_ASSERT(payload->isMemory() || payload->isRegister());
+ if (payload->isMemory())
+ alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
+ else if (payload->isGeneralReg())
+ alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
+ else if (payload->isFloatReg())
+ alloc = RValueAllocation::Double(ToFloatRegister(payload));
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Float32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ MOZ_ASSERT(payload->isMemory() || payload->isFloatReg());
+ if (payload->isFloatReg())
+ alloc = RValueAllocation::AnyFloat(ToFloatRegister(payload));
+ else
+ alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
+ break;
+ }
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicUninitializedLexical:
+ case MIRType::MagicIsConstructing:
+ {
+ uint32_t index;
+ JSWhyMagic why = JS_GENERIC_MAGIC;
+ switch (type) {
+ case MIRType::MagicOptimizedArguments:
+ why = JS_OPTIMIZED_ARGUMENTS;
+ break;
+ case MIRType::MagicOptimizedOut:
+ why = JS_OPTIMIZED_OUT;
+ break;
+ case MIRType::MagicUninitializedLexical:
+ why = JS_UNINITIALIZED_LEXICAL;
+ break;
+ case MIRType::MagicIsConstructing:
+ why = JS_IS_CONSTRUCTING;
+ break;
+ default:
+ MOZ_CRASH("Invalid Magic MIRType");
+ }
+
+ Value v = MagicValue(why);
+ masm.propagateOOM(graph.addConstantToPool(v, &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+ default:
+ {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+#ifdef JS_NUNBOX32
+ LAllocation* type = snapshot->typeOfSlot(*allocIndex);
+ if (type->isRegister()) {
+ if (payload->isRegister())
+ alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
+ else
+ alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
+ } else {
+ if (payload->isRegister())
+ alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
+ else
+ alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
+ }
+#elif JS_PUNBOX64
+ if (payload->isRegister())
+ alloc = RValueAllocation::Untyped(ToRegister(payload));
+ else
+ alloc = RValueAllocation::Untyped(ToStackIndex(payload));
+#endif
+ break;
+ }
+ }
+
+ // This set an extra bit as part of the RValueAllocation, such that we know
+ // that recover instruction have to be executed without wrapping the
+ // instruction in a no-op recover instruction.
+ if (mir->isIncompleteObject())
+ alloc.setNeedSideEffect();
+
+ masm.propagateOOM(snapshots_.add(alloc));
+
+ *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
+}
+
+void
+CodeGeneratorShared::encode(LRecoverInfo* recover)
+{
+ if (recover->recoverOffset() != INVALID_RECOVER_OFFSET)
+ return;
+
+ uint32_t numInstructions = recover->numInstructions();
+ JitSpew(JitSpew_IonSnapshots, "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
+ (void*)recover, recover->mir()->frameCount(), numInstructions);
+
+ MResumePoint::Mode mode = recover->mir()->mode();
+ MOZ_ASSERT(mode != MResumePoint::Outer);
+ bool resumeAfter = (mode == MResumePoint::ResumeAfter);
+
+ RecoverOffset offset = recovers_.startRecover(numInstructions, resumeAfter);
+
+ for (MNode* insn : *recover)
+ recovers_.writeInstruction(insn);
+
+ recovers_.endRecover();
+ recover->setRecoverOffset(offset);
+ masm.propagateOOM(!recovers_.oom());
+}
+
+void
+CodeGeneratorShared::encode(LSnapshot* snapshot)
+{
+ if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET)
+ return;
+
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ encode(recoverInfo);
+
+ RecoverOffset recoverOffset = recoverInfo->recoverOffset();
+ MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
+
+ JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
+ (void*)snapshot, (void*) recoverInfo);
+
+ SnapshotOffset offset = snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
+
+#ifdef TRACK_SNAPSHOTS
+ uint32_t pcOpcode = 0;
+ uint32_t lirOpcode = 0;
+ uint32_t lirId = 0;
+ uint32_t mirOpcode = 0;
+ uint32_t mirId = 0;
+
+ if (LNode* ins = instruction()) {
+ lirOpcode = ins->op();
+ lirId = ins->id();
+ if (ins->mirRaw()) {
+ mirOpcode = ins->mirRaw()->op();
+ mirId = ins->mirRaw()->id();
+ if (ins->mirRaw()->trackedPc())
+ pcOpcode = *ins->mirRaw()->trackedPc();
+ }
+ }
+ snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
+#endif
+
+ uint32_t allocIndex = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
+ encodeAllocation(snapshot, *it, &allocIndex);
+ MOZ_ASSERT_IF(!snapshots_.oom(), allocWritten + 1 == snapshots_.allocWritten());
+ }
+
+ MOZ_ASSERT(allocIndex == snapshot->numSlots());
+ snapshots_.endSnapshot();
+ snapshot->setSnapshotOffset(offset);
+ masm.propagateOOM(!snapshots_.oom());
+}
+
+bool
+CodeGeneratorShared::assignBailoutId(LSnapshot* snapshot)
+{
+ MOZ_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET);
+
+ // Can we not use bailout tables at all?
+ if (!deoptTable_)
+ return false;
+
+ MOZ_ASSERT(frameClass_ != FrameSizeClass::None());
+
+ if (snapshot->bailoutId() != INVALID_BAILOUT_ID)
+ return true;
+
+ // Is the bailout table full?
+ if (bailouts_.length() >= BAILOUT_TABLE_SIZE)
+ return false;
+
+ unsigned bailoutId = bailouts_.length();
+ snapshot->setBailoutId(bailoutId);
+ JitSpew(JitSpew_IonSnapshots, "Assigned snapshot bailout id %u", bailoutId);
+ masm.propagateOOM(bailouts_.append(snapshot->snapshotOffset()));
+ return true;
+}
+
+bool
+CodeGeneratorShared::encodeSafepoints()
+{
+ for (SafepointIndex& index : safepointIndices_) {
+ LSafepoint* safepoint = index.safepoint();
+
+ if (!safepoint->encoded())
+ safepoints_.encode(safepoint);
+
+ index.resolve();
+ }
+
+ return !safepoints_.oom();
+}
+
+bool
+CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext* cx)
+{
+ js::Vector<JSScript*, 0, SystemAllocPolicy> scriptList;
+ InlineScriptTree* tree = gen->info().inlineScriptTree();
+ for (;;) {
+ // Add script from current tree.
+ bool found = false;
+ for (uint32_t i = 0; i < scriptList.length(); i++) {
+ if (scriptList[i] == tree->script()) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ if (!scriptList.append(tree->script()))
+ return false;
+ }
+
+ // Process rest of tree
+
+ // If children exist, emit children.
+ if (tree->hasChildren()) {
+ tree = tree->firstChild();
+ continue;
+ }
+
+ // Otherwise, find the first tree up the chain (including this one)
+ // that contains a next sibling.
+ while (!tree->hasNextCallee() && tree->hasCaller())
+ tree = tree->caller();
+
+ // If we found a sibling, use it.
+ if (tree->hasNextCallee()) {
+ tree = tree->nextCallee();
+ continue;
+ }
+
+ // Otherwise, we must have reached the top without finding any siblings.
+ MOZ_ASSERT(tree->isOutermostCaller());
+ break;
+ }
+
+ // Allocate array for list.
+ JSScript** data = cx->runtime()->pod_malloc<JSScript*>(scriptList.length());
+ if (!data)
+ return false;
+
+ for (uint32_t i = 0; i < scriptList.length(); i++)
+ data[i] = scriptList[i];
+
+ // Success.
+ nativeToBytecodeScriptListLength_ = scriptList.length();
+ nativeToBytecodeScriptList_ = data;
+ return true;
+}
+
+bool
+CodeGeneratorShared::generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code)
+{
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ == 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
+ MOZ_ASSERT(nativeToBytecodeNumRegions_ == 0);
+
+ if (!createNativeToBytecodeScriptList(cx))
+ return false;
+
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
+
+ CompactBufferWriter writer;
+ uint32_t tableOffset = 0;
+ uint32_t numRegions = 0;
+
+ if (!JitcodeIonTable::WriteIonTable(
+ writer, nativeToBytecodeScriptList_, nativeToBytecodeScriptListLength_,
+ &nativeToBytecodeList_[0],
+ &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
+ &tableOffset, &numRegions))
+ {
+ js_free(nativeToBytecodeScriptList_);
+ return false;
+ }
+
+ MOZ_ASSERT(tableOffset > 0);
+ MOZ_ASSERT(numRegions > 0);
+
+ // Writer is done, copy it to sized buffer.
+ uint8_t* data = cx->runtime()->pod_malloc<uint8_t>(writer.length());
+ if (!data) {
+ js_free(nativeToBytecodeScriptList_);
+ return false;
+ }
+
+ memcpy(data, writer.buffer(), writer.length());
+ nativeToBytecodeMap_ = data;
+ nativeToBytecodeMapSize_ = writer.length();
+ nativeToBytecodeTableOffset_ = tableOffset;
+ nativeToBytecodeNumRegions_ = numRegions;
+
+ verifyCompactNativeToBytecodeMap(code);
+
+ JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]",
+ data, data + nativeToBytecodeMapSize_);
+
+ return true;
+}
+
+void
+CodeGeneratorShared::verifyCompactNativeToBytecodeMap(JitCode* code)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
+ MOZ_ASSERT(nativeToBytecodeNumRegions_ > 0);
+
+ // The pointer to the table must be 4-byte aligned
+ const uint8_t* tablePtr = nativeToBytecodeMap_ + nativeToBytecodeTableOffset_;
+ MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
+
+ // Verify that numRegions was encoded correctly.
+ const JitcodeIonTable* ionTable = reinterpret_cast<const JitcodeIonTable*>(tablePtr);
+ MOZ_ASSERT(ionTable->numRegions() == nativeToBytecodeNumRegions_);
+
+ // Region offset for first region should be at the start of the payload region.
+ // Since the offsets are backward from the start of the table, the first entry
+ // backoffset should be equal to the forward table offset from the start of the
+ // allocated data.
+ MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
+
+ // Verify each region.
+ for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
+ // Back-offset must point into the payload region preceding the table, not before it.
+ MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
+
+ // Back-offset must point to a later area in the payload region than previous
+ // back-offset. This means that back-offsets decrease monotonically.
+ MOZ_ASSERT_IF(i > 0, ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
+
+ JitcodeRegionEntry entry = ionTable->regionEntry(i);
+
+ // Ensure native code offset for region falls within jitcode.
+ MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
+
+ // Read out script/pc stack and verify.
+ JitcodeRegionEntry::ScriptPcIterator scriptPcIter = entry.scriptPcIterator();
+ while (scriptPcIter.hasMore()) {
+ uint32_t scriptIdx = 0, pcOffset = 0;
+ scriptPcIter.readNext(&scriptIdx, &pcOffset);
+
+ // Ensure scriptIdx refers to a valid script in the list.
+ MOZ_ASSERT(scriptIdx < nativeToBytecodeScriptListLength_);
+ JSScript* script = nativeToBytecodeScriptList_[scriptIdx];
+
+ // Ensure pcOffset falls within the script.
+ MOZ_ASSERT(pcOffset < script->length());
+ }
+
+ // Obtain the original nativeOffset and pcOffset and script.
+ uint32_t curNativeOffset = entry.nativeOffset();
+ JSScript* script = nullptr;
+ uint32_t curPcOffset = 0;
+ {
+ uint32_t scriptIdx = 0;
+ scriptPcIter.reset();
+ scriptPcIter.readNext(&scriptIdx, &curPcOffset);
+ script = nativeToBytecodeScriptList_[scriptIdx];
+ }
+
+ // Read out nativeDeltas and pcDeltas and verify.
+ JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
+ while (deltaIter.hasMore()) {
+ uint32_t nativeDelta = 0;
+ int32_t pcDelta = 0;
+ deltaIter.readNext(&nativeDelta, &pcDelta);
+
+ curNativeOffset += nativeDelta;
+ curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
+
+ // Ensure that nativeOffset still falls within jitcode after delta.
+ MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
+
+ // Ensure that pcOffset still falls within bytecode after delta.
+ MOZ_ASSERT(curPcOffset < script->length());
+ }
+ }
+#endif // DEBUG
+}
+
+bool
+CodeGeneratorShared::generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
+ IonTrackedTypeVector* allTypes)
+{
+ MOZ_ASSERT(trackedOptimizationsMap_ == nullptr);
+ MOZ_ASSERT(trackedOptimizationsMapSize_ == 0);
+ MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ == 0);
+ MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ == 0);
+ MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ == 0);
+
+ if (trackedOptimizations_.empty())
+ return true;
+
+ UniqueTrackedOptimizations unique(cx);
+ if (!unique.init())
+ return false;
+
+ // Iterate through all entries to deduplicate their optimization attempts.
+ for (size_t i = 0; i < trackedOptimizations_.length(); i++) {
+ NativeToTrackedOptimizations& entry = trackedOptimizations_[i];
+ if (!unique.add(entry.optimizations))
+ return false;
+ }
+
+ // Sort the unique optimization attempts by frequency to stabilize the
+ // attempts' indices in the compact table we will write later.
+ if (!unique.sortByFrequency(cx))
+ return false;
+
+ // Write out the ranges and the table.
+ CompactBufferWriter writer;
+ uint32_t numRegions;
+ uint32_t regionTableOffset;
+ uint32_t typesTableOffset;
+ uint32_t attemptsTableOffset;
+ if (!WriteIonTrackedOptimizationsTable(cx, writer,
+ trackedOptimizations_.begin(),
+ trackedOptimizations_.end(),
+ unique, &numRegions,
+ &regionTableOffset, &typesTableOffset,
+ &attemptsTableOffset, allTypes))
+ {
+ return false;
+ }
+
+ MOZ_ASSERT(regionTableOffset > 0);
+ MOZ_ASSERT(typesTableOffset > 0);
+ MOZ_ASSERT(attemptsTableOffset > 0);
+ MOZ_ASSERT(typesTableOffset > regionTableOffset);
+ MOZ_ASSERT(attemptsTableOffset > typesTableOffset);
+
+ // Copy over the table out of the writer's buffer.
+ uint8_t* data = cx->runtime()->pod_malloc<uint8_t>(writer.length());
+ if (!data)
+ return false;
+
+ memcpy(data, writer.buffer(), writer.length());
+ trackedOptimizationsMap_ = data;
+ trackedOptimizationsMapSize_ = writer.length();
+ trackedOptimizationsRegionTableOffset_ = regionTableOffset;
+ trackedOptimizationsTypesTableOffset_ = typesTableOffset;
+ trackedOptimizationsAttemptsTableOffset_ = attemptsTableOffset;
+
+ verifyCompactTrackedOptimizationsMap(code, numRegions, unique, allTypes);
+
+ JitSpew(JitSpew_OptimizationTracking,
+ "== Compact Native To Optimizations Map [%p-%p] size %u",
+ data, data + trackedOptimizationsMapSize_, trackedOptimizationsMapSize_);
+ JitSpew(JitSpew_OptimizationTracking,
+ " with type list of length %" PRIuSIZE ", size %" PRIuSIZE,
+ allTypes->length(), allTypes->length() * sizeof(IonTrackedTypeWithAddendum));
+
+ return true;
+}
+
+#ifdef DEBUG
+class ReadTempAttemptsVectorOp : public JS::ForEachTrackedOptimizationAttemptOp
+{
+ TempOptimizationAttemptsVector* attempts_;
+ bool oom_;
+
+ public:
+ explicit ReadTempAttemptsVectorOp(TempOptimizationAttemptsVector* attempts)
+ : attempts_(attempts), oom_(false)
+ { }
+
+ bool oom() {
+ return oom_;
+ }
+
+ void operator()(JS::TrackedStrategy strategy, JS::TrackedOutcome outcome) override {
+ if (!attempts_->append(OptimizationAttempt(strategy, outcome)))
+ oom_ = true;
+ }
+};
+
+struct ReadTempTypeInfoVectorOp : public IonTrackedOptimizationsTypeInfo::ForEachOp
+{
+ TempAllocator& alloc_;
+ TempOptimizationTypeInfoVector* types_;
+ TempTypeList accTypes_;
+ bool oom_;
+
+ public:
+ ReadTempTypeInfoVectorOp(TempAllocator& alloc, TempOptimizationTypeInfoVector* types)
+ : alloc_(alloc),
+ types_(types),
+ accTypes_(alloc),
+ oom_(false)
+ { }
+
+ bool oom() {
+ return oom_;
+ }
+
+ void readType(const IonTrackedTypeWithAddendum& tracked) override {
+ if (!accTypes_.append(tracked.type))
+ oom_ = true;
+ }
+
+ void operator()(JS::TrackedTypeSite site, MIRType mirType) override {
+ OptimizationTypeInfo ty(alloc_, site, mirType);
+ for (uint32_t i = 0; i < accTypes_.length(); i++) {
+ if (!ty.trackType(accTypes_[i]))
+ oom_ = true;
+ }
+ if (!types_->append(mozilla::Move(ty)))
+ oom_ = true;
+ accTypes_.clear();
+ }
+};
+#endif // DEBUG
+
+void
+CodeGeneratorShared::verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
+ const UniqueTrackedOptimizations& unique,
+ const IonTrackedTypeVector* allTypes)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(trackedOptimizationsMap_ != nullptr);
+ MOZ_ASSERT(trackedOptimizationsMapSize_ > 0);
+ MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ > 0);
+ MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ > 0);
+ MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ > 0);
+
+ // Table pointers must all be 4-byte aligned.
+ const uint8_t* regionTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsRegionTableOffset_;
+ const uint8_t* typesTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsTypesTableOffset_;
+ const uint8_t* attemptsTableAddr = trackedOptimizationsMap_ +
+ trackedOptimizationsAttemptsTableOffset_;
+ MOZ_ASSERT(uintptr_t(regionTableAddr) % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(uintptr_t(typesTableAddr) % sizeof(uint32_t) == 0);
+ MOZ_ASSERT(uintptr_t(attemptsTableAddr) % sizeof(uint32_t) == 0);
+
+ // Assert that the number of entries matches up for the tables.
+ const IonTrackedOptimizationsRegionTable* regionTable =
+ (const IonTrackedOptimizationsRegionTable*) regionTableAddr;
+ MOZ_ASSERT(regionTable->numEntries() == numRegions);
+ const IonTrackedOptimizationsTypesTable* typesTable =
+ (const IonTrackedOptimizationsTypesTable*) typesTableAddr;
+ MOZ_ASSERT(typesTable->numEntries() == unique.count());
+ const IonTrackedOptimizationsAttemptsTable* attemptsTable =
+ (const IonTrackedOptimizationsAttemptsTable*) attemptsTableAddr;
+ MOZ_ASSERT(attemptsTable->numEntries() == unique.count());
+
+ // Verify each region.
+ uint32_t trackedIdx = 0;
+ for (uint32_t regionIdx = 0; regionIdx < regionTable->numEntries(); regionIdx++) {
+ // Check reverse offsets are within bounds.
+ MOZ_ASSERT(regionTable->entryOffset(regionIdx) <= trackedOptimizationsRegionTableOffset_);
+ MOZ_ASSERT_IF(regionIdx > 0, regionTable->entryOffset(regionIdx) <
+ regionTable->entryOffset(regionIdx - 1));
+
+ IonTrackedOptimizationsRegion region = regionTable->entry(regionIdx);
+
+ // Check the region range is covered by jitcode.
+ MOZ_ASSERT(region.startOffset() <= code->instructionsSize());
+ MOZ_ASSERT(region.endOffset() <= code->instructionsSize());
+
+ IonTrackedOptimizationsRegion::RangeIterator iter = region.ranges();
+ while (iter.more()) {
+ // Assert that the offsets are correctly decoded from the delta.
+ uint32_t startOffset, endOffset;
+ uint8_t index;
+ iter.readNext(&startOffset, &endOffset, &index);
+ NativeToTrackedOptimizations& entry = trackedOptimizations_[trackedIdx++];
+ MOZ_ASSERT(startOffset == entry.startOffset.offset());
+ MOZ_ASSERT(endOffset == entry.endOffset.offset());
+ MOZ_ASSERT(index == unique.indexOf(entry.optimizations));
+
+ // Assert that the type info and attempts vectors are correctly
+ // decoded. This is disabled for now if the types table might
+ // contain nursery pointers, in which case the types might not
+ // match, see bug 1175761.
+ if (!code->runtimeFromMainThread()->gc.storeBuffer.cancelIonCompilations()) {
+ IonTrackedOptimizationsTypeInfo typeInfo = typesTable->entry(index);
+ TempOptimizationTypeInfoVector tvec(alloc());
+ ReadTempTypeInfoVectorOp top(alloc(), &tvec);
+ typeInfo.forEach(top, allTypes);
+ MOZ_ASSERT_IF(!top.oom(), entry.optimizations->matchTypes(tvec));
+ }
+
+ IonTrackedOptimizationsAttempts attempts = attemptsTable->entry(index);
+ TempOptimizationAttemptsVector avec(alloc());
+ ReadTempAttemptsVectorOp aop(&avec);
+ attempts.forEach(aop);
+ MOZ_ASSERT_IF(!aop.oom(), entry.optimizations->matchAttempts(avec));
+ }
+ }
+#endif
+}
+
+void
+CodeGeneratorShared::markSafepoint(LInstruction* ins)
+{
+ markSafepointAt(masm.currentOffset(), ins);
+}
+
+void
+CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins)
+{
+ MOZ_ASSERT_IF(!safepointIndices_.empty() && !masm.oom(),
+ offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
+ masm.propagateOOM(safepointIndices_.append(SafepointIndex(offset, ins->safepoint())));
+}
+
+void
+CodeGeneratorShared::ensureOsiSpace()
+{
+ // For a refresher, an invalidation point is of the form:
+ // 1: call <target>
+ // 2: ...
+ // 3: <osipoint>
+ //
+ // The four bytes *before* instruction 2 are overwritten with an offset.
+ // Callers must ensure that the instruction itself has enough bytes to
+ // support this.
+ //
+ // The bytes *at* instruction 3 are overwritten with an invalidation jump.
+ // jump. These bytes may be in a completely different IR sequence, but
+ // represent the join point of the call out of the function.
+ //
+ // At points where we want to ensure that invalidation won't corrupt an
+ // important instruction, we make sure to pad with nops.
+ if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::PatchWrite_NearCallSize()) {
+ int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
+ paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
+ for (int32_t i = 0; i < paddingSize; ++i)
+ masm.nop();
+ }
+ MOZ_ASSERT_IF(!masm.oom(),
+ masm.currentOffset() - lastOsiPointOffset_ >= Assembler::PatchWrite_NearCallSize());
+ lastOsiPointOffset_ = masm.currentOffset();
+}
+
+uint32_t
+CodeGeneratorShared::markOsiPoint(LOsiPoint* ins)
+{
+ encode(ins->snapshot());
+ ensureOsiSpace();
+
+ uint32_t offset = masm.currentOffset();
+ SnapshotOffset so = ins->snapshot()->snapshotOffset();
+ masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
+
+ return offset;
+}
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+template <class Op>
+static void
+HandleRegisterDump(Op op, MacroAssembler& masm, LiveRegisterSet liveRegs, Register activation,
+ Register scratch)
+{
+ const size_t baseOffset = JitActivation::offsetOfRegs();
+
+ // Handle live GPRs.
+ for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
+ Register reg = *iter;
+ Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
+
+ if (reg == activation) {
+ // To use the original value of the activation register (that's
+ // now on top of the stack), we need the scratch register.
+ masm.push(scratch);
+ masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
+ op(scratch, dump);
+ masm.pop(scratch);
+ } else {
+ op(reg, dump);
+ }
+ }
+
+ // Handle live FPRs.
+ for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
+ op(reg, dump);
+ }
+}
+
+class StoreOp
+{
+ MacroAssembler& masm;
+
+ public:
+ explicit StoreOp(MacroAssembler& masm)
+ : masm(masm)
+ {}
+
+ void operator()(Register reg, Address dump) {
+ masm.storePtr(reg, dump);
+ }
+ void operator()(FloatRegister reg, Address dump) {
+ if (reg.isDouble())
+ masm.storeDouble(reg, dump);
+ else if (reg.isSingle())
+ masm.storeFloat32(reg, dump);
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ else if (reg.isSimd128())
+ masm.storeUnalignedSimd128Float(reg, dump);
+#endif
+ else
+ MOZ_CRASH("Unexpected register type.");
+ }
+};
+
+static void
+StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs)
+{
+ // Store a copy of all live registers before performing the call.
+ // When we reach the OsiPoint, we can use this to check nothing
+ // modified them in the meantime.
+
+ // Load pointer to the JitActivation in a scratch register.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.add32(Imm32(1), checkRegs);
+
+ StoreOp op(masm);
+ HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
+
+ masm.pop(scratch);
+}
+
+class VerifyOp
+{
+ MacroAssembler& masm;
+ Label* failure_;
+
+ public:
+ VerifyOp(MacroAssembler& masm, Label* failure)
+ : masm(masm), failure_(failure)
+ {}
+
+ void operator()(Register reg, Address dump) {
+ masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
+ }
+ void operator()(FloatRegister reg, Address dump) {
+ FloatRegister scratch;
+ if (reg.isDouble()) {
+ scratch = ScratchDoubleReg;
+ masm.loadDouble(dump, scratch);
+ masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
+ } else if (reg.isSingle()) {
+ scratch = ScratchFloat32Reg;
+ masm.loadFloat32(dump, scratch);
+ masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
+ }
+
+ // :TODO: (Bug 1133745) Add support to verify SIMD registers.
+ }
+};
+
+void
+CodeGeneratorShared::verifyOsiPointRegs(LSafepoint* safepoint)
+{
+ // Ensure the live registers stored by callVM did not change between
+ // the call and this OsiPoint. Try-catch relies on this invariant.
+
+ // Load pointer to the JitActivation in a scratch register.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+
+ // If we should not check registers (because the instruction did not call
+ // into the VM, or a GC happened), we're done.
+ Label failure, done;
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
+
+ // Having more than one VM function call made in one visit function at
+ // runtime is a sec-ciritcal error, because if we conservatively assume that
+ // one of the function call can re-enter Ion, then the invalidation process
+ // will potentially add a call at a random location, by patching the code
+ // before the return address.
+ masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
+
+ // Set checkRegs to 0, so that we don't try to verify registers after we
+ // return from this script to the caller.
+ masm.store32(Imm32(0), checkRegs);
+
+ // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
+ // temps after calling into the VM. This is fine because no other
+ // instructions (including this OsiPoint) will depend on them. Also
+ // backtracking can also use the same register for an input and an output.
+ // These are marked as clobbered and shouldn't get checked.
+ LiveRegisterSet liveRegs;
+ liveRegs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(),
+ RegisterSet::Not(safepoint->clobberedRegs().set()));
+
+ VerifyOp op(masm, &failure);
+ HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
+
+ masm.jump(&done);
+
+ // Do not profile the callWithABI that occurs below. This is to avoid a
+ // rare corner case that occurs when profiling interacts with itself:
+ //
+ // When slow profiling assertions are turned on, FunctionBoundary ops
+ // (which update the profiler pseudo-stack) may emit a callVM, which
+ // forces them to have an osi point associated with them. The
+ // FunctionBoundary for inline function entry is added to the caller's
+ // graph with a PC from the caller's code, but during codegen it modifies
+ // SPS instrumentation to add the callee as the current top-most script.
+ // When codegen gets to the OSIPoint, and the callWithABI below is
+ // emitted, the codegen thinks that the current frame is the callee, but
+ // the PC it's using from the OSIPoint refers to the caller. This causes
+ // the profiler instrumentation of the callWithABI below to ASSERT, since
+ // the script and pc are mismatched. To avoid this, we simply omit
+ // instrumentation for these callWithABIs.
+
+ // Any live register captured by a safepoint (other than temp registers)
+ // must remain unchanged between the call and the OsiPoint instruction.
+ masm.bind(&failure);
+ masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
+
+ masm.bind(&done);
+ masm.pop(scratch);
+}
+
+bool
+CodeGeneratorShared::shouldVerifyOsiPointRegs(LSafepoint* safepoint)
+{
+ if (!checkOsiPointRegisters)
+ return false;
+
+ if (safepoint->liveRegs().emptyGeneral() && safepoint->liveRegs().emptyFloat())
+ return false; // No registers to check.
+
+ return true;
+}
+
+void
+CodeGeneratorShared::resetOsiPointRegs(LSafepoint* safepoint)
+{
+ if (!shouldVerifyOsiPointRegs(safepoint))
+ return;
+
+ // Set checkRegs to 0. If we perform a VM call, the instruction
+ // will set it to 1.
+ AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
+ Register scratch = allRegs.takeAny();
+ masm.push(scratch);
+ masm.loadJitActivation(scratch);
+ Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
+ masm.store32(Imm32(0), checkRegs);
+ masm.pop(scratch);
+}
+#endif
+
+// Before doing any call to Cpp, you should ensure that volatile
+// registers are evicted by the register allocator.
+void
+CodeGeneratorShared::callVM(const VMFunction& fun, LInstruction* ins, const Register* dynStack)
+{
+ // If we're calling a function with an out parameter type of double, make
+ // sure we have an FPU.
+ MOZ_ASSERT_IF(fun.outParam == Type_Double, GetJitContext()->runtime->jitSupportsFloatingPoint());
+
+#ifdef DEBUG
+ if (ins->mirRaw()) {
+ MOZ_ASSERT(ins->mirRaw()->isInstruction());
+ MInstruction* mir = ins->mirRaw()->toInstruction();
+ MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
+ }
+#endif
+
+ // Stack is:
+ // ... frame ...
+ // [args]
+#ifdef DEBUG
+ MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
+ pushedArgs_ = 0;
+#endif
+
+ // Get the wrapper of the VM function.
+ JitCode* wrapper = gen->jitRuntime()->getVMWrapper(fun);
+ if (!wrapper) {
+ masm.setOOM();
+ return;
+ }
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ if (shouldVerifyOsiPointRegs(ins->safepoint()))
+ StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
+#endif
+
+ // Push an exit frame descriptor. If |dynStack| is a valid pointer to a
+ // register, then its value is added to the value of the |framePushed()| to
+ // fill the frame descriptor.
+ if (dynStack) {
+ masm.addPtr(Imm32(masm.framePushed()), *dynStack);
+ masm.makeFrameDescriptor(*dynStack, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.Push(*dynStack); // descriptor
+ } else {
+ masm.pushStaticFrameDescriptor(JitFrame_IonJS, ExitFrameLayout::Size());
+ }
+
+ // Call the wrapper function. The wrapper is in charge to unwind the stack
+ // when returning from the call. Failures are handled with exceptions based
+ // on the return value of the C functions. To guard the outcome of the
+ // returned value, use another LIR instruction.
+ uint32_t callOffset = masm.callJit(wrapper);
+ markSafepointAt(callOffset, ins);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly poped when returning.
+ int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
+ // Stack is:
+ // ... frame ...
+}
+
+class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared>
+{
+ FloatRegister src_;
+ Register dest_;
+ bool widenFloatToDouble_;
+
+ public:
+ OutOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble = false)
+ : src_(src), dest_(dest), widenFloatToDouble_(widenFloatToDouble)
+ { }
+
+ void accept(CodeGeneratorShared* codegen) {
+ codegen->visitOutOfLineTruncateSlow(this);
+ }
+ FloatRegister src() const {
+ return src_;
+ }
+ Register dest() const {
+ return dest_;
+ }
+ bool widenFloatToDouble() const {
+ return widenFloatToDouble_;
+ }
+
+};
+
+OutOfLineCode*
+CodeGeneratorShared::oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir)
+{
+ OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest);
+ addOutOfLineCode(ool, mir);
+ return ool;
+}
+
+void
+CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir)
+{
+ OutOfLineCode* ool = oolTruncateDouble(src, dest, mir);
+
+ masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir)
+{
+ OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest, true);
+ addOutOfLineCode(ool, mir);
+
+ masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void
+CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
+{
+ FloatRegister src = ool->src();
+ Register dest = ool->dest();
+
+ saveVolatile(dest);
+ masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(), gen->compilingWasm());
+ restoreVolatile(dest);
+
+ masm.jump(ool->rejoin());
+}
+
+bool
+CodeGeneratorShared::omitOverRecursedCheck() const
+{
+ // If the current function makes no calls (which means it isn't recursive)
+ // and it uses only a small amount of stack space, it doesn't need a
+ // stack overflow check. Note that the actual number here is somewhat
+ // arbitrary, and codegen actually uses small bounded amounts of
+ // additional stack space in some cases too.
+ return frameSize() < 64 && !gen->performsCall();
+}
+
+void
+CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
+{
+ MWasmCall* mir = ins->mir();
+
+ if (mir->spIncrement())
+ masm.freeStack(mir->spIncrement());
+
+ MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment == 0);
+ static_assert(WasmStackAlignment >= ABIStackAlignment &&
+ WasmStackAlignment % ABIStackAlignment == 0,
+ "The wasm stack alignment should subsume the ABI-required alignment");
+
+#ifdef DEBUG
+ Label ok;
+ masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+
+ // Save the caller's TLS register in a reserved stack slot (below the
+ // call's stack arguments) for retrieval after the call.
+ if (mir->saveTls())
+ masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
+
+ const wasm::CallSiteDesc& desc = mir->desc();
+ const wasm::CalleeDesc& callee = mir->callee();
+ switch (callee.which()) {
+ case wasm::CalleeDesc::Func:
+ masm.call(desc, callee.funcIndex());
+ break;
+ case wasm::CalleeDesc::Import:
+ masm.wasmCallImport(desc, callee);
+ break;
+ case wasm::CalleeDesc::WasmTable:
+ case wasm::CalleeDesc::AsmJSTable:
+ masm.wasmCallIndirect(desc, callee);
+ break;
+ case wasm::CalleeDesc::Builtin:
+ masm.call(callee.builtin());
+ break;
+ case wasm::CalleeDesc::BuiltinInstanceMethod:
+ masm.wasmCallBuiltinInstanceMethod(mir->instanceArg(), callee.builtin());
+ break;
+ }
+
+ // After return, restore the caller's TLS and pinned registers.
+ if (mir->saveTls()) {
+ masm.loadPtr(Address(masm.getStackPointer(), mir->tlsStackOffset()), WasmTlsReg);
+ masm.loadWasmPinnedRegsFromTls();
+ }
+
+ if (mir->spIncrement())
+ masm.reserveStack(mir->spIncrement());
+}
+
+void
+CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
+{
+ if (index->isConstant()) {
+ Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+ } else {
+ BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+ }
+}
+
+void
+CodeGeneratorShared::emitPreBarrier(Address address)
+{
+ masm.patchableCallPreBarrier(address, MIRType::Value);
+}
+
+Label*
+CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
+{
+ // If this is a loop backedge to a loop header with an implicit interrupt
+ // check, use a patchable jump. Skip this search if compiling without a
+ // script for wasm, as there will be no interrupt check instruction.
+ // Due to critical edge unsplitting there may no longer be unique loop
+ // backedges, so just look for any edge going to an earlier block in RPO.
+ if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
+ for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
+ if (iter->isMoveGroup()) {
+ // Continue searching for an interrupt check.
+ } else {
+ // The interrupt check should be the first instruction in the
+ // loop header other than move groups.
+ MOZ_ASSERT(iter->isInterruptCheck());
+ if (iter->toInterruptCheck()->implicit())
+ return iter->toInterruptCheck()->oolEntry();
+ return nullptr;
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+void
+CodeGeneratorShared::jumpToBlock(MBasicBlock* mir)
+{
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ // No jump necessary if we can fall through to the next block.
+ if (isNextBlock(mir->lir()))
+ return;
+
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge = masm.backedgeJump(&rejoin, mir->lir()->label());
+ masm.bind(&rejoin);
+
+ masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
+ } else {
+ masm.jump(mir->lir()->label());
+ }
+}
+
+Label*
+CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block)
+{
+ // Skip past trivial blocks.
+ block = skipTrivialBlocks(block);
+
+ if (!labelForBackedgeWithImplicitCheck(block))
+ return block->lir()->label();
+
+ // We need to use a patchable jump for this backedge, but want to treat
+ // this as a normal label target to simplify codegen. Efficiency isn't so
+ // important here as these tests are extremely unlikely to be used in loop
+ // backedges, so emit inline code for the patchable jump. Heap allocating
+ // the label allows it to be used by out of line blocks.
+ Label* res = alloc().lifoAlloc()->newInfallible<Label>();
+ Label after;
+ masm.jump(&after);
+ masm.bind(res);
+ jumpToBlock(block);
+ masm.bind(&after);
+ return res;
+}
+
+// This function is not used for MIPS/MIPS64. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+void
+CodeGeneratorShared::jumpToBlock(MBasicBlock* mir, Assembler::Condition cond)
+{
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond, mir->lir()->label());
+ masm.bind(&rejoin);
+
+ masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
+ } else {
+ masm.j(cond, mir->lir()->label());
+ }
+}
+#endif
+
+MOZ_MUST_USE bool
+CodeGeneratorShared::addCacheLocations(const CacheLocationList& locs, size_t* numLocs,
+ size_t* curIndex)
+{
+ size_t firstIndex = runtimeData_.length();
+ size_t numLocations = 0;
+ for (CacheLocationList::iterator iter = locs.begin(); iter != locs.end(); iter++) {
+ // allocateData() ensures that sizeof(CacheLocation) is word-aligned.
+ // If this changes, we will need to pad to ensure alignment.
+ if (!allocateData(sizeof(CacheLocation), curIndex))
+ return false;
+ new (&runtimeData_[*curIndex]) CacheLocation(iter->pc, iter->script);
+ numLocations++;
+ }
+ MOZ_ASSERT(numLocations != 0);
+ *numLocs = numLocations;
+ *curIndex = firstIndex;
+ return true;
+}
+
+ReciprocalMulConstants
+CodeGeneratorShared::computeDivisionConstants(uint32_t d, int maxLog) {
+ MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
+ // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
+ MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
+
+ // Speeding up division by non power-of-2 constants is possible by
+ // calculating, during compilation, a value M such that high-order
+ // bits of M*n correspond to the result of the division of n by d.
+ // No value of M can serve this purpose for arbitrarily big values
+ // of n but, for optimizing integer division, we're just concerned
+ // with values of n whose absolute value is bounded (by fitting in
+ // an integer type, say). With this in mind, we'll find a constant
+ // M as above that works for -2^maxLog <= n < 2^maxLog; maxLog can
+ // then be 31 for signed division or 32 for unsigned division.
+ //
+ // The original presentation of this technique appears in Hacker's
+ // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
+ // for our version follows; we'll denote maxLog by L in the proof,
+ // for conciseness.
+ //
+ // Formally, for |d| < 2^L, we'll compute two magic values M and s
+ // in the ranges 0 <= M < 2^(L+1) and 0 <= s <= L such that
+ // (M * n) >> (32 + s) = floor(n/d) if 0 <= n < 2^L
+ // (M * n) >> (32 + s) = ceil(n/d) - 1 if -2^L <= n < 0.
+ //
+ // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
+ // M - 2^p/d <= 2^(p-L)/d. (1)
+ // (Observe that p = CeilLog32(d) + L satisfies this, as the right
+ // side of (1) is at least one in this case). Then,
+ //
+ // a) If p <= CeilLog32(d) + L, then M < 2^(L+1) - 1.
+ // Proof: Indeed, M is monotone in p and, for p equal to the above
+ // value, the bounds 2^L > d >= 2^(p-L-1) + 1 readily imply that
+ // 2^p / d < 2^p/(d - 1) * (d - 1)/d
+ // <= 2^(L+1) * (1 - 1/d) < 2^(L+1) - 2.
+ // The claim follows by applying the ceiling function.
+ //
+ // b) For any 0 <= n < 2^L, floor(Mn/2^p) = floor(n/d).
+ // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
+ // Mn/2^p - 1 < x <= Mn/2^p. (2)
+ // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
+ // n/d - 1 < x <= n/d + n/(2^L d) < n/d + 1/d.
+ // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
+ // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
+ //
+ // c) For any -2^L <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
+ // Proof: The proof is similar. Equation (2) holds as above. Using
+ // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
+ // n/d + n/(2^L d) - 1 < x < n/d.
+ // Using n >= -2^L and summing 1,
+ // n/d - 1/d < x + 1 < n/d + 1.
+ // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
+ // In other words, x + 1 = ceil(n/d).
+ //
+ // Condition (1) isn't necessary for the existence of M and s with
+ // the properties above. Hacker's Delight provides a slightly less
+ // restrictive condition when d >= 196611, at the cost of a 3-page
+ // proof of correctness, for the case L = 31.
+ //
+ // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
+ // 2^(p-L) >= d - (2^p)%d.
+ // In order to avoid overflow in the (2^p) % d calculation, we can
+ // compute it as (2^p-1) % d + 1, where 2^p-1 can then be computed
+ // without overflow as UINT64_MAX >> (64-p).
+
+ // We now compute the least p >= 32 with the property above...
+ int32_t p = 32;
+ while ((uint64_t(1) << (p-maxLog)) + (UINT64_MAX >> (64-p)) % d + 1 < d)
+ p++;
+
+ // ...and the corresponding M. For either the signed (L=31) or the
+ // unsigned (L=32) case, this value can be too large (cf. item a).
+ // Codegen can still multiply by M by multiplying by (M - 2^L) and
+ // adjusting the value afterwards, if this is the case.
+ ReciprocalMulConstants rmc;
+ rmc.multiplier = (UINT64_MAX >> (64-p))/d + 1;
+ rmc.shiftAmount = p - 32;
+
+ return rmc;
+}
+
+#ifdef JS_TRACE_LOGGING
+
+void
+CodeGeneratorShared::emitTracelogScript(bool isStart)
+{
+ if (!TraceLogTextIdEnabled(TraceLogger_Scripts))
+ return;
+
+ Label done;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register logger = regs.takeAnyGeneral();
+ Register script = regs.takeAnyGeneral();
+
+ masm.Push(logger);
+
+ CodeOffset patchLogger = masm.movWithPatch(ImmPtr(nullptr), logger);
+ masm.propagateOOM(patchableTraceLoggers_.append(patchLogger));
+
+ masm.branchTest32(Assembler::Zero, logger, logger, &done);
+
+ Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ masm.Push(script);
+
+ CodeOffset patchScript = masm.movWithPatch(ImmWord(0), script);
+ masm.propagateOOM(patchableTLScripts_.append(patchScript));
+
+ if (isStart)
+ masm.tracelogStartId(logger, script);
+ else
+ masm.tracelogStopId(logger, script);
+
+ masm.Pop(script);
+
+ masm.bind(&done);
+
+ masm.Pop(logger);
+}
+
+void
+CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId)
+{
+ if (!TraceLogTextIdEnabled(textId))
+ return;
+
+ Label done;
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register logger = regs.takeAnyGeneral();
+
+ masm.Push(logger);
+
+ CodeOffset patchLocation = masm.movWithPatch(ImmPtr(nullptr), logger);
+ masm.propagateOOM(patchableTraceLoggers_.append(patchLocation));
+
+ masm.branchTest32(Assembler::Zero, logger, logger, &done);
+
+ Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ if (isStart)
+ masm.tracelogStartId(logger, textId);
+ else
+ masm.tracelogStopId(logger, textId);
+
+ masm.bind(&done);
+
+ masm.Pop(logger);
+}
+
+void
+CodeGeneratorShared::emitTracelogTree(bool isStart, const char* text,
+ TraceLoggerTextId enabledTextId)
+{
+ if (!TraceLogTextIdEnabled(enabledTextId))
+ return;
+
+ Label done;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register loggerReg = regs.takeAnyGeneral();
+ Register eventReg = regs.takeAnyGeneral();
+
+ masm.Push(loggerReg);
+
+ CodeOffset patchLocation = masm.movWithPatch(ImmPtr(nullptr), loggerReg);
+ masm.propagateOOM(patchableTraceLoggers_.append(patchLocation));
+
+ masm.branchTest32(Assembler::Zero, loggerReg, loggerReg, &done);
+
+ Address enabledAddress(loggerReg, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ masm.Push(eventReg);
+
+ PatchableTLEvent patchEvent(masm.movWithPatch(ImmWord(0), eventReg), text);
+ masm.propagateOOM(patchableTLEvents_.append(Move(patchEvent)));
+
+ if (isStart)
+ masm.tracelogStartId(loggerReg, eventReg);
+ else
+ masm.tracelogStopId(loggerReg, eventReg);
+
+ masm.Pop(eventReg);
+
+ masm.bind(&done);
+
+ masm.Pop(loggerReg);
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h
new file mode 100644
index 000000000..c96808c2d
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -0,0 +1,850 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_h
+#define jit_shared_CodeGenerator_shared_h
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Move.h"
+#include "mozilla/TypeTraits.h"
+
+#include "jit/JitFrames.h"
+#include "jit/LIR.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/OptimizationTracking.h"
+#include "jit/Safepoints.h"
+#include "jit/Snapshots.h"
+#include "jit/VMFunctions.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineCode;
+class CodeGenerator;
+class MacroAssembler;
+class IonCache;
+
+template <class ArgSeq, class StoreOutputTo>
+class OutOfLineCallVM;
+
+class OutOfLineTruncateSlow;
+class OutOfLineWasmTruncateCheck;
+
+struct PatchableBackedgeInfo
+{
+ CodeOffsetJump backedge;
+ Label* loopHeader;
+ Label* interruptCheck;
+
+ PatchableBackedgeInfo(CodeOffsetJump backedge, Label* loopHeader, Label* interruptCheck)
+ : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
+ {}
+};
+
+struct ReciprocalMulConstants {
+ int64_t multiplier;
+ int32_t shiftAmount;
+};
+
+// This should be nested in CodeGeneratorShared, but it is used in
+// optimization tracking implementation and nested classes cannot be
+// forward-declared.
+struct NativeToTrackedOptimizations
+{
+ // [startOffset, endOffset]
+ CodeOffset startOffset;
+ CodeOffset endOffset;
+ const TrackedOptimizations* optimizations;
+};
+
+class CodeGeneratorShared : public LElementVisitor
+{
+ js::Vector<OutOfLineCode*, 0, SystemAllocPolicy> outOfLineCode_;
+
+ MacroAssembler& ensureMasm(MacroAssembler* masm);
+ mozilla::Maybe<MacroAssembler> maybeMasm_;
+
+ public:
+ MacroAssembler& masm;
+
+ protected:
+ MIRGenerator* gen;
+ LIRGraph& graph;
+ LBlock* current;
+ SnapshotWriter snapshots_;
+ RecoverWriter recovers_;
+ JitCode* deoptTable_;
+#ifdef DEBUG
+ uint32_t pushedArgs_;
+#endif
+ uint32_t lastOsiPointOffset_;
+ SafepointWriter safepoints_;
+ Label invalidate_;
+ CodeOffset invalidateEpilogueData_;
+
+ // Label for the common return path.
+ NonAssertingLabel returnLabel_;
+
+ FallbackICStubSpace stubSpace_;
+
+ js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
+ js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
+
+ // Mapping from bailout table ID to an offset in the snapshot buffer.
+ js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_;
+
+ // Allocated data space needed at runtime.
+ js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
+
+ // Vector of information about generated polymorphic inline caches.
+ js::Vector<uint32_t, 0, SystemAllocPolicy> cacheList_;
+
+ // Patchable backedges generated for loops.
+ Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_;
+
+#ifdef JS_TRACE_LOGGING
+ struct PatchableTLEvent {
+ CodeOffset offset;
+ const char* event;
+ PatchableTLEvent(CodeOffset offset, const char* event)
+ : offset(offset), event(event)
+ {}
+ };
+ js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTraceLoggers_;
+ js::Vector<PatchableTLEvent, 0, SystemAllocPolicy> patchableTLEvents_;
+ js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTLScripts_;
+#endif
+
+ public:
+ struct NativeToBytecode {
+ CodeOffset nativeOffset;
+ InlineScriptTree* tree;
+ jsbytecode* pc;
+ };
+
+ protected:
+ js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
+ uint8_t* nativeToBytecodeMap_;
+ uint32_t nativeToBytecodeMapSize_;
+ uint32_t nativeToBytecodeTableOffset_;
+ uint32_t nativeToBytecodeNumRegions_;
+
+ JSScript** nativeToBytecodeScriptList_;
+ uint32_t nativeToBytecodeScriptListLength_;
+
+ bool isProfilerInstrumentationEnabled() {
+ return gen->isProfilerInstrumentationEnabled();
+ }
+
+ js::Vector<NativeToTrackedOptimizations, 0, SystemAllocPolicy> trackedOptimizations_;
+ uint8_t* trackedOptimizationsMap_;
+ uint32_t trackedOptimizationsMapSize_;
+ uint32_t trackedOptimizationsRegionTableOffset_;
+ uint32_t trackedOptimizationsTypesTableOffset_;
+ uint32_t trackedOptimizationsAttemptsTableOffset_;
+
+ bool isOptimizationTrackingEnabled() {
+ return gen->isOptimizationTrackingEnabled();
+ }
+
+ protected:
+ // The offset of the first instruction of the OSR entry block from the
+ // beginning of the code buffer.
+ size_t osrEntryOffset_;
+
+ TempAllocator& alloc() const {
+ return graph.mir().alloc();
+ }
+
+ inline void setOsrEntryOffset(size_t offset) {
+ MOZ_ASSERT(osrEntryOffset_ == 0);
+ osrEntryOffset_ = offset;
+ }
+ inline size_t getOsrEntryOffset() const {
+ return osrEntryOffset_;
+ }
+
+ // The offset of the first instruction of the body.
+ // This skips the arguments type checks.
+ size_t skipArgCheckEntryOffset_;
+
+ inline void setSkipArgCheckEntryOffset(size_t offset) {
+ MOZ_ASSERT(skipArgCheckEntryOffset_ == 0);
+ skipArgCheckEntryOffset_ = offset;
+ }
+ inline size_t getSkipArgCheckEntryOffset() const {
+ return skipArgCheckEntryOffset_;
+ }
+
+ typedef js::Vector<SafepointIndex, 8, SystemAllocPolicy> SafepointIndices;
+
+ protected:
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // See JitOptions.checkOsiPointRegisters. We set this here to avoid
+ // races when enableOsiPointRegisterChecks is called while we're generating
+ // code off-thread.
+ bool checkOsiPointRegisters;
+#endif
+
+ // The initial size of the frame in bytes. These are bytes beyond the
+ // constant header present for every Ion frame, used for pre-determined
+ // spills.
+ int32_t frameDepth_;
+
+ // In some cases, we force stack alignment to platform boundaries, see
+ // also CodeGeneratorShared constructor. This value records the adjustment
+ // we've done.
+ int32_t frameInitialAdjustment_;
+
+ // Frame class this frame's size falls into (see IonFrame.h).
+ FrameSizeClass frameClass_;
+
+ // For arguments to the current function.
+ inline int32_t ArgToStackOffset(int32_t slot) const;
+
+ // For the callee of the current function.
+ inline int32_t CalleeStackOffset() const;
+
+ inline int32_t SlotToStackOffset(int32_t slot) const;
+ inline int32_t StackOffsetToSlot(int32_t offset) const;
+
+ // For argument construction for calls. Argslots are Value-sized.
+ inline int32_t StackOffsetOfPassedArg(int32_t slot) const;
+
+ inline int32_t ToStackOffset(LAllocation a) const;
+ inline int32_t ToStackOffset(const LAllocation* a) const;
+
+ inline Address ToAddress(const LAllocation& a);
+ inline Address ToAddress(const LAllocation* a);
+
+ uint32_t frameSize() const {
+ return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize();
+ }
+
+ protected:
+#ifdef CHECK_OSIPOINT_REGISTERS
+ void resetOsiPointRegs(LSafepoint* safepoint);
+ bool shouldVerifyOsiPointRegs(LSafepoint* safepoint);
+ void verifyOsiPointRegs(LSafepoint* safepoint);
+#endif
+
+ bool addNativeToBytecodeEntry(const BytecodeSite* site);
+ void dumpNativeToBytecodeEntries();
+ void dumpNativeToBytecodeEntry(uint32_t idx);
+
+ bool addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations);
+ void extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations);
+
+ public:
+ MIRGenerator& mirGen() const {
+ return *gen;
+ }
+
+ // When appending to runtimeData_, the vector might realloc, leaving pointers
+ // int the origianl vector stale and unusable. DataPtr acts like a pointer,
+ // but allows safety in the face of potentially realloc'ing vector appends.
+ friend class DataPtr;
+ template <typename T>
+ class DataPtr
+ {
+ CodeGeneratorShared* cg_;
+ size_t index_;
+
+ T* lookup() {
+ return reinterpret_cast<T*>(&cg_->runtimeData_[index_]);
+ }
+ public:
+ DataPtr(CodeGeneratorShared* cg, size_t index)
+ : cg_(cg), index_(index) { }
+
+ T * operator ->() {
+ return lookup();
+ }
+ T * operator*() {
+ return lookup();
+ }
+ };
+
+ protected:
+ MOZ_MUST_USE
+ bool allocateData(size_t size, size_t* offset) {
+ MOZ_ASSERT(size % sizeof(void*) == 0);
+ *offset = runtimeData_.length();
+ masm.propagateOOM(runtimeData_.appendN(0, size));
+ return !masm.oom();
+ }
+
+ // Ensure the cache is an IonCache while expecting the size of the derived
+ // class. We only need the cache list at GC time. Everyone else can just take
+ // runtimeData offsets.
+ template <typename T>
+ inline size_t allocateCache(const T& cache) {
+ static_assert(mozilla::IsBaseOf<IonCache, T>::value, "T must inherit from IonCache");
+ size_t index;
+ masm.propagateOOM(allocateData(sizeof(mozilla::AlignedStorage2<T>), &index));
+ masm.propagateOOM(cacheList_.append(index));
+ if (masm.oom())
+ return SIZE_MAX;
+ // Use the copy constructor on the allocated space.
+ MOZ_ASSERT(index == cacheList_.back());
+ new (&runtimeData_[index]) T(cache);
+ return index;
+ }
+
+ protected:
+ // Encodes an LSnapshot into the compressed snapshot buffer.
+ void encode(LRecoverInfo* recover);
+ void encode(LSnapshot* snapshot);
+ void encodeAllocation(LSnapshot* snapshot, MDefinition* def, uint32_t* startIndex);
+
+ // Attempts to assign a BailoutId to a snapshot, if one isn't already set.
+ // If the bailout table is full, this returns false, which is not a fatal
+ // error (the code generator may use a slower bailout mechanism).
+ bool assignBailoutId(LSnapshot* snapshot);
+
+ // Encode all encountered safepoints in CG-order, and resolve |indices| for
+ // safepoint offsets.
+ bool encodeSafepoints();
+
+ // Fixup offsets of native-to-bytecode map.
+ bool createNativeToBytecodeScriptList(JSContext* cx);
+ bool generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code);
+ void verifyCompactNativeToBytecodeMap(JitCode* code);
+
+ bool generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
+ IonTrackedTypeVector* allTypes);
+ void verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
+ const UniqueTrackedOptimizations& unique,
+ const IonTrackedTypeVector* allTypes);
+
+ // Mark the safepoint on |ins| as corresponding to the current assembler location.
+ // The location should be just after a call.
+ void markSafepoint(LInstruction* ins);
+ void markSafepointAt(uint32_t offset, LInstruction* ins);
+
+ // Mark the OSI point |ins| as corresponding to the current
+ // assembler location inside the |osiIndices_|. Return the assembler
+ // location for the OSI point return location.
+ uint32_t markOsiPoint(LOsiPoint* ins);
+
+ // Ensure that there is enough room between the last OSI point and the
+ // current instruction, such that:
+ // (1) Invalidation will not overwrite the current instruction, and
+ // (2) Overwriting the current instruction will not overwrite
+ // an invalidation marker.
+ void ensureOsiSpace();
+
+ OutOfLineCode* oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir);
+ void emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir);
+ void emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir);
+
+ void emitWasmCallBase(LWasmCallBase* ins);
+
+ void emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment);
+ void emitPreBarrier(Address address);
+
+ // We don't emit code for trivial blocks, so if we want to branch to the
+ // given block, and it's trivial, return the ultimate block we should
+ // actually branch directly to.
+ MBasicBlock* skipTrivialBlocks(MBasicBlock* block) {
+ while (block->lir()->isTrivial()) {
+ MOZ_ASSERT(block->lir()->rbegin()->numSuccessors() == 1);
+ block = block->lir()->rbegin()->getSuccessor(0);
+ }
+ return block;
+ }
+
+ // Test whether the given block can be reached via fallthrough from the
+ // current block.
+ inline bool isNextBlock(LBlock* block) {
+ uint32_t target = skipTrivialBlocks(block->mir())->id();
+ uint32_t i = current->mir()->id() + 1;
+ if (target < i)
+ return false;
+ // Trivial blocks can be crossed via fallthrough.
+ for (; i != target; ++i) {
+ if (!graph.getBlock(i)->isTrivial())
+ return false;
+ }
+ return true;
+ }
+
+ public:
+ // Save and restore all volatile registers to/from the stack, excluding the
+ // specified register(s), before a function call made using callWithABI and
+ // after storing the function call's return value to an output register.
+ // (The only registers that don't need to be saved/restored are 1) the
+ // temporary register used to store the return value of the function call,
+ // if there is one [otherwise that stored value would be overwritten]; and
+ // 2) temporary registers whose values aren't needed in the rest of the LIR
+ // instruction [this is purely an optimization]. All other volatiles must
+ // be saved and restored in case future LIR instructions need those values.)
+ void saveVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(LiveRegisterSet temps) {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void restoreVolatile(LiveRegisterSet temps) {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void saveVolatile() {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+ void restoreVolatile() {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+
+ // These functions have to be called before and after any callVM and before
+ // any modifications of the stack. Modification of the stack made after
+ // these calls should update the framePushed variable, needed by the exit
+ // frame produced by callVM.
+ inline void saveLive(LInstruction* ins);
+ inline void restoreLive(LInstruction* ins);
+ inline void restoreLiveIgnore(LInstruction* ins, LiveRegisterSet reg);
+
+ // Save/restore all registers that are both live and volatile.
+ inline void saveLiveVolatile(LInstruction* ins);
+ inline void restoreLiveVolatile(LInstruction* ins);
+
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ }
+
+ void storePointerResultTo(Register reg) {
+ masm.storeCallPointerResult(reg);
+ }
+
+ void storeFloatResultTo(FloatRegister reg) {
+ masm.storeCallFloatResult(reg);
+ }
+
+ template <typename T>
+ void storeResultValueTo(const T& t) {
+ masm.storeCallResultValue(t);
+ }
+
+ void callVM(const VMFunction& f, LInstruction* ins, const Register* dynStack = nullptr);
+
+ template <class ArgSeq, class StoreOutputTo>
+ inline OutOfLineCode* oolCallVM(const VMFunction& fun, LInstruction* ins, const ArgSeq& args,
+ const StoreOutputTo& out);
+
+ void addCache(LInstruction* lir, size_t cacheIndex);
+ bool addCacheLocations(const CacheLocationList& locs, size_t* numLocs, size_t* offset);
+ ReciprocalMulConstants computeDivisionConstants(uint32_t d, int maxLog);
+
+ protected:
+ bool generatePrologue();
+ bool generateEpilogue();
+
+ void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir);
+ void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site);
+ bool generateOutOfLineCode();
+
+ Label* getJumpLabelForBranch(MBasicBlock* block);
+
+ // Generate a jump to the start of the specified block, adding information
+ // if this is a loop backedge. Use this in place of jumping directly to
+ // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use
+ // directly is needed.
+ void jumpToBlock(MBasicBlock* mir);
+
+ // Get a label for the start of block which can be used for jumping, in
+ // place of jumpToBlock.
+ Label* labelForBackedgeWithImplicitCheck(MBasicBlock* mir);
+
+// This function is not used for MIPS. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+ void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond);
+#endif
+
+ template <class T>
+ wasm::TrapDesc trap(T* mir, wasm::Trap trap) {
+ return wasm::TrapDesc(mir->trapOffset(), trap, masm.framePushed());
+ }
+
+ private:
+ void generateInvalidateEpilogue();
+
+ public:
+ CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ template <class ArgSeq, class StoreOutputTo>
+ void visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>* ool);
+
+ void visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool);
+
+ virtual void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool) {
+ MOZ_CRASH("NYI");
+ }
+
+ bool omitOverRecursedCheck() const;
+
+#ifdef JS_TRACE_LOGGING
+ protected:
+ void emitTracelogScript(bool isStart);
+ void emitTracelogTree(bool isStart, uint32_t textId);
+ void emitTracelogTree(bool isStart, const char* text, TraceLoggerTextId enabledTextId);
+
+ public:
+ void emitTracelogScriptStart() {
+ emitTracelogScript(/* isStart =*/ true);
+ }
+ void emitTracelogScriptStop() {
+ emitTracelogScript(/* isStart =*/ false);
+ }
+ void emitTracelogStartEvent(uint32_t textId) {
+ emitTracelogTree(/* isStart =*/ true, textId);
+ }
+ void emitTracelogStopEvent(uint32_t textId) {
+ emitTracelogTree(/* isStart =*/ false, textId);
+ }
+ // Log an arbitrary text. The TraceloggerTextId is used to toggle the
+ // logging on and off.
+ // Note: the text is not copied and need to be kept alive until linking.
+ void emitTracelogStartEvent(const char* text, TraceLoggerTextId enabledTextId) {
+ emitTracelogTree(/* isStart =*/ true, text, enabledTextId);
+ }
+ void emitTracelogStopEvent(const char* text, TraceLoggerTextId enabledTextId) {
+ emitTracelogTree(/* isStart =*/ false, text, enabledTextId);
+ }
+#endif
+ void emitTracelogIonStart() {
+#ifdef JS_TRACE_LOGGING
+ emitTracelogScriptStart();
+ emitTracelogStartEvent(TraceLogger_IonMonkey);
+#endif
+ }
+ void emitTracelogIonStop() {
+#ifdef JS_TRACE_LOGGING
+ emitTracelogStopEvent(TraceLogger_IonMonkey);
+ emitTracelogScriptStop();
+#endif
+ }
+
+ protected:
+ inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
+ Scalar::Type type, Operand mem, LAllocation alloc);
+
+ public:
+ inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc);
+ inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
+ Operand mem, LAllocation alloc);
+
+ bool isGlobalObject(JSObject* object);
+};
+
+// An out-of-line path is generated at the end of the function.
+class OutOfLineCode : public TempObject
+{
+ Label entry_;
+ Label rejoin_;
+ uint32_t framePushed_;
+ const BytecodeSite* site_;
+
+ public:
+ OutOfLineCode()
+ : framePushed_(0),
+ site_()
+ { }
+
+ virtual void generate(CodeGeneratorShared* codegen) = 0;
+
+ Label* entry() {
+ return &entry_;
+ }
+ virtual void bind(MacroAssembler* masm) {
+ masm->bind(entry());
+ }
+ Label* rejoin() {
+ return &rejoin_;
+ }
+ void setFramePushed(uint32_t framePushed) {
+ framePushed_ = framePushed;
+ }
+ uint32_t framePushed() const {
+ return framePushed_;
+ }
+ void setBytecodeSite(const BytecodeSite* site) {
+ site_ = site;
+ }
+ const BytecodeSite* bytecodeSite() const {
+ return site_;
+ }
+ jsbytecode* pc() const {
+ return site_->pc();
+ }
+ JSScript* script() const {
+ return site_->script();
+ }
+};
+
+// For OOL paths that want a specific-typed code generator.
+template <typename T>
+class OutOfLineCodeBase : public OutOfLineCode
+{
+ public:
+ virtual void generate(CodeGeneratorShared* codegen) {
+ accept(static_cast<T*>(codegen));
+ }
+
+ public:
+ virtual void accept(T* codegen) = 0;
+};
+
+// ArgSeq store arguments for OutOfLineCallVM.
+//
+// OutOfLineCallVM are created with "oolCallVM" function. The third argument of
+// this function is an instance of a class which provides a "generate" in charge
+// of pushing the argument, with "pushArg", for a VMFunction.
+//
+// Such list of arguments can be created by using the "ArgList" function which
+// creates one instance of "ArgSeq", where the type of the arguments are inferred
+// from the type of the arguments.
+//
+// The list of arguments must be written in the same order as if you were
+// calling the function in C++.
+//
+// Example:
+// ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
+
+template <typename... ArgTypes>
+class ArgSeq;
+
+template <>
+class ArgSeq<>
+{
+ public:
+ ArgSeq() { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ }
+};
+
+template <typename HeadType, typename... TailTypes>
+class ArgSeq<HeadType, TailTypes...> : public ArgSeq<TailTypes...>
+{
+ private:
+ using RawHeadType = typename mozilla::RemoveReference<HeadType>::Type;
+ RawHeadType head_;
+
+ public:
+ template <typename ProvidedHead, typename... ProvidedTail>
+ explicit ArgSeq(ProvidedHead&& head, ProvidedTail&&... tail)
+ : ArgSeq<TailTypes...>(mozilla::Forward<ProvidedTail>(tail)...),
+ head_(mozilla::Forward<ProvidedHead>(head))
+ { }
+
+ // Arguments are pushed in reverse order, from last argument to first
+ // argument.
+ inline void generate(CodeGeneratorShared* codegen) const {
+ this->ArgSeq<TailTypes...>::generate(codegen);
+ codegen->pushArg(head_);
+ }
+};
+
+template <typename... ArgTypes>
+inline ArgSeq<ArgTypes...>
+ArgList(ArgTypes&&... args)
+{
+ return ArgSeq<ArgTypes...>(mozilla::Forward<ArgTypes>(args)...);
+}
+
+// Store wrappers, to generate the right move of data after the VM call.
+
+struct StoreNothing
+{
+ inline void generate(CodeGeneratorShared* codegen) const {
+ }
+ inline LiveRegisterSet clobbered() const {
+ return LiveRegisterSet(); // No register gets clobbered
+ }
+};
+
+class StoreRegisterTo
+{
+ private:
+ Register out_;
+
+ public:
+ explicit StoreRegisterTo(Register out)
+ : out_(out)
+ { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ // It's okay to use storePointerResultTo here - the VMFunction wrapper
+ // ensures the upper bytes are zero for bool/int32 return values.
+ codegen->storePointerResultTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+class StoreFloatRegisterTo
+{
+ private:
+ FloatRegister out_;
+
+ public:
+ explicit StoreFloatRegisterTo(FloatRegister out)
+ : out_(out)
+ { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ codegen->storeFloatResultTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+template <typename Output>
+class StoreValueTo_
+{
+ private:
+ Output out_;
+
+ public:
+ explicit StoreValueTo_(const Output& out)
+ : out_(out)
+ { }
+
+ inline void generate(CodeGeneratorShared* codegen) const {
+ codegen->storeResultValueTo(out_);
+ }
+ inline LiveRegisterSet clobbered() const {
+ LiveRegisterSet set;
+ set.add(out_);
+ return set;
+ }
+};
+
+template <typename Output>
+StoreValueTo_<Output> StoreValueTo(const Output& out)
+{
+ return StoreValueTo_<Output>(out);
+}
+
+template <class ArgSeq, class StoreOutputTo>
+class OutOfLineCallVM : public OutOfLineCodeBase<CodeGeneratorShared>
+{
+ private:
+ LInstruction* lir_;
+ const VMFunction& fun_;
+ ArgSeq args_;
+ StoreOutputTo out_;
+
+ public:
+ OutOfLineCallVM(LInstruction* lir, const VMFunction& fun, const ArgSeq& args,
+ const StoreOutputTo& out)
+ : lir_(lir),
+ fun_(fun),
+ args_(args),
+ out_(out)
+ { }
+
+ void accept(CodeGeneratorShared* codegen) {
+ codegen->visitOutOfLineCallVM(this);
+ }
+
+ LInstruction* lir() const { return lir_; }
+ const VMFunction& function() const { return fun_; }
+ const ArgSeq& args() const { return args_; }
+ const StoreOutputTo& out() const { return out_; }
+};
+
+template <class ArgSeq, class StoreOutputTo>
+inline OutOfLineCode*
+CodeGeneratorShared::oolCallVM(const VMFunction& fun, LInstruction* lir, const ArgSeq& args,
+ const StoreOutputTo& out)
+{
+ MOZ_ASSERT(lir->mirRaw());
+ MOZ_ASSERT(lir->mirRaw()->isInstruction());
+
+ OutOfLineCode* ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
+ addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
+ return ool;
+}
+
+template <class ArgSeq, class StoreOutputTo>
+void
+CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>* ool)
+{
+ LInstruction* lir = ool->lir();
+
+ saveLive(lir);
+ ool->args().generate(this);
+ callVM(ool->function(), lir);
+ ool->out().generate(this);
+ restoreLiveIgnore(lir, ool->out().clobbered());
+ masm.jump(ool->rejoin());
+}
+
+class OutOfLineWasmTruncateCheck : public OutOfLineCodeBase<CodeGeneratorShared>
+{
+ MIRType fromType_;
+ MIRType toType_;
+ FloatRegister input_;
+ bool isUnsigned_;
+ wasm::TrapOffset trapOffset_;
+
+ public:
+ OutOfLineWasmTruncateCheck(MWasmTruncateToInt32* mir, FloatRegister input)
+ : fromType_(mir->input()->type()), toType_(MIRType::Int32), input_(input),
+ isUnsigned_(mir->isUnsigned()), trapOffset_(mir->trapOffset())
+ { }
+
+ OutOfLineWasmTruncateCheck(MWasmTruncateToInt64* mir, FloatRegister input)
+ : fromType_(mir->input()->type()), toType_(MIRType::Int64), input_(input),
+ isUnsigned_(mir->isUnsigned()), trapOffset_(mir->trapOffset())
+ { }
+
+ void accept(CodeGeneratorShared* codegen) {
+ codegen->visitOutOfLineWasmTruncateCheck(this);
+ }
+
+ FloatRegister input() const { return input_; }
+ MIRType toType() const { return toType_; }
+ MIRType fromType() const { return fromType_; }
+ bool isUnsigned() const { return isUnsigned_; }
+ wasm::TrapOffset trapOffset() const { return trapOffset_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_h */
diff --git a/js/src/jit/shared/IonAssemblerBuffer.h b/js/src/jit/shared/IonAssemblerBuffer.h
new file mode 100644
index 000000000..cc20e26d2
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBuffer.h
@@ -0,0 +1,417 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBuffer_h
+#define jit_shared_IonAssemblerBuffer_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+// The offset into a buffer, in bytes.
+class BufferOffset
+{
+ int offset;
+
+ public:
+ friend BufferOffset nextOffset();
+
+ BufferOffset()
+ : offset(INT_MIN)
+ { }
+
+ explicit BufferOffset(int offset_)
+ : offset(offset_)
+ { }
+
+ explicit BufferOffset(Label* l)
+ : offset(l->offset())
+ { }
+
+ explicit BufferOffset(RepatchLabel* l)
+ : offset(l->offset())
+ { }
+
+ int getOffset() const { return offset; }
+ bool assigned() const { return offset != INT_MIN; }
+
+ // A BOffImm is a Branch Offset Immediate. It is an architecture-specific
+ // structure that holds the immediate for a pc relative branch. diffB takes
+ // the label for the destination of the branch, and encodes the immediate
+ // for the branch. This will need to be fixed up later, since A pool may be
+ // inserted between the branch and its destination.
+ template <class BOffImm>
+ BOffImm diffB(BufferOffset other) const {
+ if (!BOffImm::IsInRange(offset - other.offset))
+ return BOffImm();
+ return BOffImm(offset - other.offset);
+ }
+
+ template <class BOffImm>
+ BOffImm diffB(Label* other) const {
+ MOZ_ASSERT(other->bound());
+ if (!BOffImm::IsInRange(offset - other->offset()))
+ return BOffImm();
+ return BOffImm(offset - other->offset());
+ }
+};
+
+inline bool
+operator<(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() < b.getOffset();
+}
+
+inline bool
+operator>(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() > b.getOffset();
+}
+
+inline bool
+operator<=(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() <= b.getOffset();
+}
+
+inline bool
+operator>=(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() >= b.getOffset();
+}
+
+inline bool
+operator==(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() == b.getOffset();
+}
+
+inline bool
+operator!=(BufferOffset a, BufferOffset b)
+{
+ return a.getOffset() != b.getOffset();
+}
+
+template<int SliceSize>
+class BufferSlice
+{
+ protected:
+ BufferSlice<SliceSize>* prev_;
+ BufferSlice<SliceSize>* next_;
+
+ size_t bytelength_;
+
+ public:
+ mozilla::Array<uint8_t, SliceSize> instructions;
+
+ public:
+ explicit BufferSlice()
+ : prev_(nullptr), next_(nullptr), bytelength_(0)
+ { }
+
+ size_t length() const { return bytelength_; }
+ static inline size_t Capacity() { return SliceSize; }
+
+ BufferSlice* getNext() const { return next_; }
+ BufferSlice* getPrev() const { return prev_; }
+
+ void setNext(BufferSlice<SliceSize>* next) {
+ MOZ_ASSERT(next_ == nullptr);
+ MOZ_ASSERT(next->prev_ == nullptr);
+ next_ = next;
+ next->prev_ = this;
+ }
+
+ void putBytes(size_t numBytes, const void* source) {
+ MOZ_ASSERT(bytelength_ + numBytes <= SliceSize);
+ if (source)
+ memcpy(&instructions[length()], source, numBytes);
+ bytelength_ += numBytes;
+ }
+};
+
+template<int SliceSize, class Inst>
+class AssemblerBuffer
+{
+ protected:
+ typedef BufferSlice<SliceSize> Slice;
+ typedef AssemblerBuffer<SliceSize, Inst> AssemblerBuffer_;
+
+ // Doubly-linked list of BufferSlices, with the most recent in tail position.
+ Slice* head;
+ Slice* tail;
+
+ bool m_oom;
+ bool m_bail;
+
+ // How many bytes has been committed to the buffer thus far.
+ // Does not include tail.
+ uint32_t bufferSize;
+
+ // Finger for speeding up accesses.
+ Slice* finger;
+ int finger_offset;
+
+ LifoAlloc lifoAlloc_;
+
+ public:
+ explicit AssemblerBuffer()
+ : head(nullptr),
+ tail(nullptr),
+ m_oom(false),
+ m_bail(false),
+ bufferSize(0),
+ finger(nullptr),
+ finger_offset(0),
+ lifoAlloc_(8192)
+ { }
+
+ public:
+ bool isAligned(size_t alignment) const {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ return !(size() & (alignment - 1));
+ }
+
+ protected:
+ virtual Slice* newSlice(LifoAlloc& a) {
+ Slice* tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
+ if (!tmp) {
+ fail_oom();
+ return nullptr;
+ }
+ return new (tmp) Slice;
+ }
+
+ public:
+ bool ensureSpace(size_t size) {
+ // Space can exist in the most recent Slice.
+ if (tail && tail->length() + size <= tail->Capacity()) {
+ // Simulate allocation failure even when we don't need a new slice.
+ if (js::oom::ShouldFailWithOOM())
+ return fail_oom();
+
+ return true;
+ }
+
+ // Otherwise, a new Slice must be added.
+ Slice* slice = newSlice(lifoAlloc_);
+ if (slice == nullptr)
+ return fail_oom();
+
+ // If this is the first Slice in the buffer, add to head position.
+ if (!head) {
+ head = slice;
+ finger = slice;
+ finger_offset = 0;
+ }
+
+ // Finish the last Slice and add the new Slice to the linked list.
+ if (tail) {
+ bufferSize += tail->length();
+ tail->setNext(slice);
+ }
+ tail = slice;
+
+ return true;
+ }
+
+ BufferOffset putByte(uint8_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putShort(uint16_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putInt(uint32_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ // Add numBytes bytes to this buffer.
+ // The data must fit in a single slice.
+ BufferOffset putBytes(size_t numBytes, const void* inst) {
+ if (!ensureSpace(numBytes))
+ return BufferOffset();
+
+ BufferOffset ret = nextOffset();
+ tail->putBytes(numBytes, inst);
+ return ret;
+ }
+
+ // Add a potentially large amount of data to this buffer.
+ // The data may be distrubuted across multiple slices.
+ // Return the buffer offset of the first added byte.
+ BufferOffset putBytesLarge(size_t numBytes, const void* data)
+ {
+ BufferOffset ret = nextOffset();
+ while (numBytes > 0) {
+ if (!ensureSpace(1))
+ return BufferOffset();
+ size_t avail = tail->Capacity() - tail->length();
+ size_t xfer = numBytes < avail ? numBytes : avail;
+ MOZ_ASSERT(xfer > 0, "ensureSpace should have allocated a slice");
+ tail->putBytes(xfer, data);
+ data = (const uint8_t*)data + xfer;
+ numBytes -= xfer;
+ }
+ return ret;
+ }
+
+ unsigned int size() const {
+ if (tail)
+ return bufferSize + tail->length();
+ return bufferSize;
+ }
+
+ bool oom() const { return m_oom || m_bail; }
+ bool bail() const { return m_bail; }
+
+ bool fail_oom() {
+ m_oom = true;
+ return false;
+ }
+ bool fail_bail() {
+ m_bail = true;
+ return false;
+ }
+
+ private:
+ void update_finger(Slice* finger_, int fingerOffset_) {
+ finger = finger_;
+ finger_offset = fingerOffset_;
+ }
+
+ static const unsigned SliceDistanceRequiringFingerUpdate = 3;
+
+ Inst* getInstForwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset;
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset >= cursor);
+
+ for (Slice *slice = start; slice != nullptr; slice = slice->getNext()) {
+ const int slicelen = slice->length();
+
+ // Is the offset within the bounds of this slice?
+ if (offset < cursor + slicelen) {
+ if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate)
+ update_finger(slice, cursor);
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ cursor += slicelen;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ Inst* getInstBackwards(BufferOffset off, Slice* start, int startOffset, bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset; // First (lowest) offset in the start Slice.
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset < int(cursor + start->length()));
+
+ for (Slice* slice = start; slice != nullptr; ) {
+ // Is the offset within the bounds of this slice?
+ if (offset >= cursor) {
+ if (updateFinger || slicesSkipped >= SliceDistanceRequiringFingerUpdate)
+ update_finger(slice, cursor);
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ // Move the cursor to the start of the previous slice.
+ Slice* prev = slice->getPrev();
+ cursor -= prev->length();
+
+ slice = prev;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ public:
+ Inst* getInstOrNull(BufferOffset off) {
+ if (!off.assigned())
+ return nullptr;
+ return getInst(off);
+ }
+
+ // Get a pointer to the instruction at offset |off| which must be within the
+ // bounds of the buffer. Use |getInstOrNull()| if |off| may be unassigned.
+ Inst* getInst(BufferOffset off) {
+ const int offset = off.getOffset();
+ MOZ_RELEASE_ASSERT(off.assigned() && offset >= 0 && (unsigned)offset < size());
+
+ // Is the instruction in the last slice?
+ if (offset >= int(bufferSize))
+ return (Inst*)&tail->instructions[offset - bufferSize];
+
+ // How close is this offset to the previous one we looked up?
+ // If it is sufficiently far from the start and end of the buffer,
+ // use the finger to start midway through the list.
+ int finger_dist = abs(offset - finger_offset);
+ if (finger_dist < Min(offset, int(bufferSize - offset))) {
+ if (finger_offset < offset)
+ return getInstForwards(off, finger, finger_offset, true);
+ return getInstBackwards(off, finger, finger_offset, true);
+ }
+
+ // Is the instruction closer to the start or to the end?
+ if (offset < int(bufferSize - offset))
+ return getInstForwards(off, head, 0);
+
+ // The last slice was already checked above, so start at the
+ // second-to-last.
+ Slice* prev = tail->getPrev();
+ return getInstBackwards(off, prev, bufferSize - prev->length());
+ }
+
+ BufferOffset nextOffset() const {
+ if (tail)
+ return BufferOffset(bufferSize + tail->length());
+ return BufferOffset(bufferSize);
+ }
+
+ class AssemblerBufferInstIterator
+ {
+ BufferOffset bo;
+ AssemblerBuffer_* m_buffer;
+
+ public:
+ explicit AssemblerBufferInstIterator(BufferOffset off, AssemblerBuffer_* buffer)
+ : bo(off), m_buffer(buffer)
+ { }
+
+ Inst* next() {
+ Inst* i = m_buffer->getInst(bo);
+ bo = BufferOffset(bo.getOffset() + i->size());
+ return cur();
+ }
+
+ Inst* cur() {
+ return m_buffer->getInst(bo);
+ }
+ };
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBuffer_h
diff --git a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
new file mode 100644
index 000000000..74fa60b12
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -0,0 +1,1145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
+#define jit_shared_IonAssemblerBufferWithConstantPools_h
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include <algorithm>
+
+#include "jit/JitSpewer.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+// This code extends the AssemblerBuffer to support the pooling of values loaded
+// using program-counter relative addressing modes. This is necessary with the
+// ARM instruction set because it has a fixed instruction size that can not
+// encode all values as immediate arguments in instructions. Pooling the values
+// allows the values to be placed in large chunks which minimizes the number of
+// forced branches around them in the code. This is used for loading floating
+// point constants, for loading 32 bit constants on the ARMv6, for absolute
+// branch targets, and in future will be needed for large branches on the ARMv6.
+//
+// For simplicity of the implementation, the constant pools are always placed
+// after the loads referencing them. When a new constant pool load is added to
+// the assembler buffer, a corresponding pool entry is added to the current
+// pending pool. The finishPool() method copies the current pending pool entries
+// into the assembler buffer at the current offset and patches the pending
+// constant pool load instructions.
+//
+// Before inserting instructions or pool entries, it is necessary to determine
+// if doing so would place a pending pool entry out of reach of an instruction,
+// and if so then the pool must firstly be dumped. With the allocation algorithm
+// used below, the recalculation of all the distances between instructions and
+// their pool entries can be avoided by noting that there will be a limiting
+// instruction and pool entry pair that does not change when inserting more
+// instructions. Adding more instructions makes the same increase to the
+// distance, between instructions and their pool entries, for all such
+// pairs. This pair is recorded as the limiter, and it is updated when new pool
+// entries are added, see updateLimiter()
+//
+// The pools consist of: a guard instruction that branches around the pool, a
+// header word that helps identify a pool in the instruction stream, and then
+// the pool entries allocated in units of words. The guard instruction could be
+// omitted if control does not reach the pool, and this is referred to as a
+// natural guard below, but for simplicity the guard branch is always
+// emitted. The pool header is an identifiable word that in combination with the
+// guard uniquely identifies a pool in the instruction stream. The header also
+// encodes the pool size and a flag indicating if the guard is natural. It is
+// possible to iterate through the code instructions skipping or examining the
+// pools. E.g. it might be necessary to skip pools when search for, or patching,
+// an instruction sequence.
+//
+// It is often required to keep a reference to a pool entry, to patch it after
+// the buffer is finished. Each pool entry is assigned a unique index, counting
+// up from zero (see the poolEntryCount slot below). These can be mapped back to
+// the offset of the pool entry in the finished buffer, see poolEntryOffset().
+//
+// The code supports no-pool regions, and for these the size of the region, in
+// instructions, must be supplied. This size is used to determine if inserting
+// the instructions would place a pool entry out of range, and if so then a pool
+// is firstly flushed. The DEBUG code checks that the emitted code is within the
+// supplied size to detect programming errors. See enterNoPool() and
+// leaveNoPool().
+
+// The only planned instruction sets that require inline constant pools are the
+// ARM, ARM64, and MIPS, and these all have fixed 32-bit sized instructions so
+// for simplicity the code below is specialized for fixed 32-bit sized
+// instructions and makes no attempt to support variable length
+// instructions. The base assembler buffer which supports variable width
+// instruction is used by the x86 and x64 backends.
+
+// The AssemblerBufferWithConstantPools template class uses static callbacks to
+// the provided Asm template argument class:
+//
+// void Asm::InsertIndexIntoTag(uint8_t* load_, uint32_t index)
+//
+// When allocEntry() is called to add a constant pool load with an associated
+// constant pool entry, this callback is called to encode the index of the
+// allocated constant pool entry into the load instruction.
+//
+// After the constant pool has been placed, PatchConstantPoolLoad() is called
+// to update the load instruction with the right load offset.
+//
+// void Asm::WritePoolGuard(BufferOffset branch,
+// Instruction* dest,
+// BufferOffset afterPool)
+//
+// Write out the constant pool guard branch before emitting the pool.
+//
+// branch
+// Offset of the guard branch in the buffer.
+//
+// dest
+// Pointer into the buffer where the guard branch should be emitted. (Same
+// as getInst(branch)). Space for guardSize_ instructions has been reserved.
+//
+// afterPool
+// Offset of the first instruction after the constant pool. This includes
+// both pool entries and branch veneers added after the pool data.
+//
+// void Asm::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
+//
+// Write out the pool header which follows the guard branch.
+//
+// void Asm::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+//
+// Re-encode a load of a constant pool entry after the location of the
+// constant pool is known.
+//
+// The load instruction at loadAddr was previously passed to
+// InsertIndexIntoTag(). The constPoolAddr is the final address of the
+// constant pool in the assembler buffer.
+//
+// void Asm::PatchShortRangeBranchToVeneer(AssemblerBufferWithConstantPools*,
+// unsigned rangeIdx,
+// BufferOffset deadline,
+// BufferOffset veneer)
+//
+// Patch a short-range branch to jump through a veneer before it goes out of
+// range.
+//
+// rangeIdx, deadline
+// These arguments were previously passed to registerBranchDeadline(). It is
+// assumed that PatchShortRangeBranchToVeneer() knows how to compute the
+// offset of the short-range branch from this information.
+//
+// veneer
+// Space for a branch veneer, guaranteed to be <= deadline. At this
+// position, guardSize_ * InstSize bytes are allocated. They should be
+// initialized to the proper unconditional branch instruction.
+//
+// Unbound branches to the same unbound label are organized as a linked list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Branch3 -> nil
+//
+// This callback should insert a new veneer branch into the list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Veneer -> Branch3 -> nil
+//
+// When Assembler::bind() rewrites the branches with the real label offset, it
+// probably has to bind Branch2 to target the veneer branch instead of jumping
+// straight to the label.
+
+namespace js {
+namespace jit {
+
+// BranchDeadlineSet - Keep track of pending branch deadlines.
+//
+// Some architectures like arm and arm64 have branch instructions with limited
+// range. When assembling a forward branch, it is not always known if the final
+// target label will be in range of the branch instruction.
+//
+// The BranchDeadlineSet data structure is used to keep track of the set of
+// pending forward branches. It supports the following fast operations:
+//
+// 1. Get the earliest deadline in the set.
+// 2. Add a new branch deadline.
+// 3. Remove a branch deadline.
+//
+// Architectures may have different branch encodings with different ranges. Each
+// supported range is assigned a small integer starting at 0. This data
+// structure does not care about the actual range of branch instructions, just
+// the latest buffer offset that can be reached - the deadline offset.
+//
+// Branched are stored as (rangeIdx, deadline) tuples. The target-specific code
+// can compute the location of the branch itself from this information. This
+// data structure does not need to know.
+//
+template <unsigned NumRanges>
+class BranchDeadlineSet
+{
+ // Maintain a list of pending deadlines for each range separately.
+ //
+ // The offsets in each vector are always kept in ascending order.
+ //
+ // Because we have a separate vector for different ranges, as forward
+ // branches are added to the assembler buffer, their deadlines will
+ // always be appended to the vector corresponding to their range.
+ //
+ // When binding labels, we expect a more-or-less LIFO order of branch
+ // resolutions. This would always hold if we had strictly structured control
+ // flow.
+ //
+ // We allow branch deadlines to be added and removed in any order, but
+ // performance is best in the expected case of near LIFO order.
+ //
+ typedef Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> RangeVector;
+
+ // We really just want "RangeVector deadline_[NumRanges];", but each vector
+ // needs to be initialized with a LifoAlloc, and C++ doesn't bend that way.
+ //
+ // Use raw aligned storage instead and explicitly construct NumRanges
+ // vectors in our constructor.
+ mozilla::AlignedStorage2<RangeVector[NumRanges]> deadlineStorage_;
+
+ // Always access the range vectors through this method.
+ RangeVector& vectorForRange(unsigned rangeIdx)
+ {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ const RangeVector& vectorForRange(unsigned rangeIdx) const
+ {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ // Maintain a precomputed earliest deadline at all times.
+ // This is unassigned only when all deadline vectors are empty.
+ BufferOffset earliest_;
+
+ // The range vector owning earliest_. Uninitialized when empty.
+ unsigned earliestRange_;
+
+ // Recompute the earliest deadline after it's been invalidated.
+ void recomputeEarliest()
+ {
+ earliest_ = BufferOffset();
+ for (unsigned r = 0; r < NumRanges; r++) {
+ auto& vec = vectorForRange(r);
+ if (!vec.empty() && (!earliest_.assigned() || vec[0] < earliest_)) {
+ earliest_ = vec[0];
+ earliestRange_ = r;
+ }
+ }
+ }
+
+ // Update the earliest deadline if needed after inserting (rangeIdx,
+ // deadline). Always return true for convenience:
+ // return insert() && updateEarliest().
+ bool updateEarliest(unsigned rangeIdx, BufferOffset deadline)
+ {
+ if (!earliest_.assigned() || deadline < earliest_) {
+ earliest_ = deadline;
+ earliestRange_ = rangeIdx;
+ }
+ return true;
+ }
+
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc)
+ {
+ // Manually construct vectors in the uninitialized aligned storage.
+ // This is because C++ arrays can otherwise only be constructed with
+ // the default constructor.
+ for (unsigned r = 0; r < NumRanges; r++)
+ new (&vectorForRange(r)) RangeVector(alloc);
+ }
+
+ ~BranchDeadlineSet()
+ {
+ // Aligned storage doesn't destruct its contents automatically.
+ for (unsigned r = 0; r < NumRanges; r++)
+ vectorForRange(r).~RangeVector();
+ }
+
+ // Is this set completely empty?
+ bool empty() const { return !earliest_.assigned(); }
+
+ // Get the total number of deadlines in the set.
+ size_t size() const
+ {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++)
+ count += vectorForRange(r).length();
+ return count;
+ }
+
+ // Get the number of deadlines for the range with the most elements.
+ size_t maxRangeSize() const
+ {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++)
+ count = std::max(count, vectorForRange(r).length());
+ return count;
+ }
+
+ // Get the first deadline that is still in the set.
+ BufferOffset earliestDeadline() const
+ {
+ MOZ_ASSERT(!empty());
+ return earliest_;
+ }
+
+ // Get the range index corresponding to earliestDeadlineRange().
+ unsigned earliestDeadlineRange() const
+ {
+ MOZ_ASSERT(!empty());
+ return earliestRange_;
+ }
+
+ // Add a (rangeIdx, deadline) tuple to the set.
+ //
+ // It is assumed that this tuple is not already in the set.
+ // This function performs best id the added deadline is later than any
+ // existing deadline for the same range index.
+ //
+ // Return true if the tuple was added, false if the tuple could not be added
+ // because of an OOM error.
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ MOZ_ASSERT(deadline.assigned(), "Can only store assigned buffer offsets");
+ // This is the vector where deadline should be saved.
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Fast case: Simple append to the relevant array. This never affects
+ // the earliest deadline.
+ if (!vec.empty() && vec.back() < deadline)
+ return vec.append(deadline);
+
+ // Fast case: First entry to the vector. We need to update earliest_.
+ if (vec.empty())
+ return vec.append(deadline) && updateEarliest(rangeIdx, deadline);
+
+ return addDeadlineSlow(rangeIdx, deadline);
+ }
+
+ private:
+ // General case of addDeadline. This is split into two functions such that
+ // the common case in addDeadline can be inlined while this part probably
+ // won't inline.
+ bool addDeadlineSlow(unsigned rangeIdx, BufferOffset deadline)
+ {
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Inserting into the middle of the vector. Use a log time binary search
+ // and a linear time insert().
+ // Is it worthwhile special-casing the empty vector?
+ auto at = std::lower_bound(vec.begin(), vec.end(), deadline);
+ MOZ_ASSERT(at == vec.end() || *at != deadline, "Cannot insert duplicate deadlines");
+ return vec.insert(at, deadline) && updateEarliest(rangeIdx, deadline);
+ }
+
+ public:
+ // Remove a deadline from the set.
+ // If (rangeIdx, deadline) is not in the set, nothing happens.
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ auto& vec = vectorForRange(rangeIdx);
+
+ if (vec.empty())
+ return;
+
+ if (deadline == vec.back()) {
+ // Expected fast case: Structured control flow causes forward
+ // branches to be bound in reverse order.
+ vec.popBack();
+ } else {
+ // Slow case: Binary search + linear erase.
+ auto where = std::lower_bound(vec.begin(), vec.end(), deadline);
+ if (where == vec.end() || *where != deadline)
+ return;
+ vec.erase(where);
+ }
+ if (deadline == earliest_)
+ recomputeEarliest();
+ }
+};
+
+// Specialization for architectures that don't need to track short-range
+// branches.
+template <>
+class BranchDeadlineSet<0u>
+{
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc) {}
+ bool empty() const { return true; }
+ size_t size() const { return 0; }
+ size_t maxRangeSize() const { return 0; }
+ BufferOffset earliestDeadline() const { MOZ_CRASH(); }
+ unsigned earliestDeadlineRange() const { MOZ_CRASH(); }
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+};
+
+// The allocation unit size for pools.
+typedef int32_t PoolAllocUnit;
+
+// Hysteresis given to short-range branches.
+//
+// If any short-range branches will go out of range in the next N bytes,
+// generate a veneer for them in the current pool. The hysteresis prevents the
+// creation of many tiny constant pools for branch veneers.
+const size_t ShortRangeBranchHysteresis = 128;
+
+struct Pool
+{
+ private:
+ // The maximum program-counter relative offset below which the instruction
+ // set can encode. Different classes of intructions might support different
+ // ranges but for simplicity the minimum is used here, and for the ARM this
+ // is constrained to 1024 by the float load instructions.
+ const size_t maxOffset_;
+ // An offset to apply to program-counter relative offsets. The ARM has a
+ // bias of 8.
+ const unsigned bias_;
+
+ // The content of the pool entries.
+ Vector<PoolAllocUnit, 8, LifoAllocPolicy<Fallible>> poolData_;
+
+ // Flag that tracks OOM conditions. This is set after any append failed.
+ bool oom_;
+
+ // The limiting instruction and pool-entry pair. The instruction program
+ // counter relative offset of this limiting instruction will go out of range
+ // first as the pool position moves forward. It is more efficient to track
+ // just this limiting pair than to recheck all offsets when testing if the
+ // pool needs to be dumped.
+ //
+ // 1. The actual offset of the limiting instruction referencing the limiting
+ // pool entry.
+ BufferOffset limitingUser;
+ // 2. The pool entry index of the limiting pool entry.
+ unsigned limitingUsee;
+
+ public:
+ // A record of the code offset of instructions that reference pool
+ // entries. These instructions need to be patched when the actual position
+ // of the instructions and pools are known, and for the code below this
+ // occurs when each pool is finished, see finishPool().
+ Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> loadOffsets;
+
+ // Create a Pool. Don't allocate anything from lifoAloc, just capture its reference.
+ explicit Pool(size_t maxOffset, unsigned bias, LifoAlloc& lifoAlloc)
+ : maxOffset_(maxOffset),
+ bias_(bias),
+ poolData_(lifoAlloc),
+ oom_(false),
+ limitingUser(),
+ limitingUsee(INT_MIN),
+ loadOffsets(lifoAlloc)
+ {
+ }
+
+ // If poolData() returns nullptr then oom_ will also be true.
+ const PoolAllocUnit* poolData() const {
+ return poolData_.begin();
+ }
+
+ unsigned numEntries() const {
+ return poolData_.length();
+ }
+
+ size_t getPoolSize() const {
+ return numEntries() * sizeof(PoolAllocUnit);
+ }
+
+ bool oom() const {
+ return oom_;
+ }
+
+ // Update the instruction/pool-entry pair that limits the position of the
+ // pool. The nextInst is the actual offset of the new instruction being
+ // allocated.
+ //
+ // This is comparing the offsets, see checkFull() below for the equation,
+ // but common expressions on both sides have been canceled from the ranges
+ // being compared. Notably, the poolOffset cancels out, so the limiting pair
+ // does not depend on where the pool is placed.
+ void updateLimiter(BufferOffset nextInst) {
+ ptrdiff_t oldRange = limitingUsee * sizeof(PoolAllocUnit) - limitingUser.getOffset();
+ ptrdiff_t newRange = getPoolSize() - nextInst.getOffset();
+ if (!limitingUser.assigned() || newRange > oldRange) {
+ // We have a new largest range!
+ limitingUser = nextInst;
+ limitingUsee = numEntries();
+ }
+ }
+
+ // Check if inserting a pool at the actual offset poolOffset would place
+ // pool entries out of reach. This is called before inserting instructions
+ // to check that doing so would not push pool entries out of reach, and if
+ // so then the pool would need to be firstly dumped. The poolOffset is the
+ // first word of the pool, after the guard and header and alignment fill.
+ bool checkFull(size_t poolOffset) const {
+ // Not full if there are no uses.
+ if (!limitingUser.assigned())
+ return false;
+ size_t offset = poolOffset + limitingUsee * sizeof(PoolAllocUnit)
+ - (limitingUser.getOffset() + bias_);
+ return offset >= maxOffset_;
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+
+ unsigned insertEntry(unsigned num, uint8_t* data, BufferOffset off, LifoAlloc& lifoAlloc) {
+ if (oom_)
+ return OOM_FAIL;
+ unsigned ret = numEntries();
+ if (!poolData_.append((PoolAllocUnit*)data, num) || !loadOffsets.append(off)) {
+ oom_ = true;
+ return OOM_FAIL;
+ }
+ return ret;
+ }
+
+ void reset() {
+ poolData_.clear();
+ loadOffsets.clear();
+
+ limitingUser = BufferOffset();
+ limitingUsee = -1;
+ }
+};
+
+
+// Template arguments:
+//
+// SliceSize
+// Number of bytes in each allocated BufferSlice. See
+// AssemblerBuffer::SliceSize.
+//
+// InstSize
+// Size in bytes of the fixed-size instructions. This should be equal to
+// sizeof(Inst). This is only needed here because the buffer is defined before
+// the Instruction.
+//
+// Inst
+// The actual type used to represent instructions. This is only really used as
+// the return type of the getInst() method.
+//
+// Asm
+// Class defining the needed static callback functions. See documentation of
+// the Asm::* callbacks above.
+//
+// NumShortBranchRanges
+// The number of short branch ranges to support. This can be 0 if no support
+// for tracking short range branches is needed. The
+// AssemblerBufferWithConstantPools class does not need to know what the range
+// of branches is - it deals in branch 'deadlines' which is the last buffer
+// position that a short-range forward branch can reach. It is assumed that
+// the Asm class is able to find the actual branch instruction given a
+// (range-index, deadline) pair.
+//
+//
+template <size_t SliceSize, size_t InstSize, class Inst, class Asm,
+ unsigned NumShortBranchRanges = 0>
+struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst>
+{
+ private:
+ // The PoolEntry index counter. Each PoolEntry is given a unique index,
+ // counting up from zero, and these can be mapped back to the actual pool
+ // entry offset after finishing the buffer, see poolEntryOffset().
+ size_t poolEntryCount;
+
+ public:
+ class PoolEntry
+ {
+ size_t index_;
+
+ public:
+ explicit PoolEntry(size_t index)
+ : index_(index)
+ { }
+
+ PoolEntry()
+ : index_(-1)
+ { }
+
+ size_t index() const {
+ return index_;
+ }
+ };
+
+ private:
+ typedef AssemblerBuffer<SliceSize, Inst> Parent;
+ using typename Parent::Slice;
+
+ // The size of a pool guard, in instructions. A branch around the pool.
+ const unsigned guardSize_;
+ // The size of the header that is put at the beginning of a full pool, in
+ // instruction sized units.
+ const unsigned headerSize_;
+
+ // The maximum pc relative offset encoded in instructions that reference
+ // pool entries. This is generally set to the maximum offset that can be
+ // encoded by the instructions, but for testing can be lowered to affect the
+ // pool placement and frequency of pool placement.
+ const size_t poolMaxOffset_;
+
+ // The bias on pc relative addressing mode offsets, in units of bytes. The
+ // ARM has a bias of 8 bytes.
+ const unsigned pcBias_;
+
+ // The current working pool. Copied out as needed before resetting.
+ Pool pool_;
+
+ // The buffer should be aligned to this address.
+ const size_t instBufferAlign_;
+
+ struct PoolInfo {
+ // The index of the first entry in this pool.
+ // Pool entries are numbered uniquely across all pools, starting from 0.
+ unsigned firstEntryIndex;
+
+ // The location of this pool's first entry in the main assembler buffer.
+ // Note that the pool guard and header come before this offset which
+ // points directly at the data.
+ BufferOffset offset;
+
+ explicit PoolInfo(unsigned index, BufferOffset data)
+ : firstEntryIndex(index)
+ , offset(data)
+ {
+ }
+ };
+
+ // Info for each pool that has already been dumped. This does not include
+ // any entries in pool_.
+ Vector<PoolInfo, 8, LifoAllocPolicy<Fallible>> poolInfo_;
+
+ // Set of short-range forward branches that have not yet been bound.
+ // We may need to insert veneers if the final label turns out to be out of
+ // range.
+ //
+ // This set stores (rangeIdx, deadline) pairs instead of the actual branch
+ // locations.
+ BranchDeadlineSet<NumShortBranchRanges> branchDeadlines_;
+
+ // When true dumping pools is inhibited.
+ bool canNotPlacePool_;
+
+#ifdef DEBUG
+ // State for validating the 'maxInst' argument to enterNoPool().
+ // The buffer offset when entering the no-pool region.
+ size_t canNotPlacePoolStartOffset_;
+ // The maximum number of word sized instructions declared for the no-pool
+ // region.
+ size_t canNotPlacePoolMaxInst_;
+#endif
+
+ // Instruction to use for alignment fill.
+ const uint32_t alignFillInst_;
+
+ // Insert a number of NOP instructions between each requested instruction at
+ // all locations at which a pool can potentially spill. This is useful for
+ // checking that instruction locations are correctly referenced and/or
+ // followed.
+ const uint32_t nopFillInst_;
+ const unsigned nopFill_;
+ // For inhibiting the insertion of fill NOPs in the dynamic context in which
+ // they are being inserted.
+ bool inhibitNops_;
+
+ public:
+ // A unique id within each JitContext, to identify pools in the debug
+ // spew. Set by the MacroAssembler, see getNextAssemblerId().
+ int id;
+
+ private:
+ // The buffer slices are in a double linked list.
+ Slice* getHead() const {
+ return this->head;
+ }
+ Slice* getTail() const {
+ return this->tail;
+ }
+
+ public:
+ // Create an assembler buffer.
+ // Note that this constructor is not allowed to actually allocate memory from this->lifoAlloc_
+ // because the MacroAssembler constructor has not yet created an AutoJitContextAlloc.
+ AssemblerBufferWithConstantPools(unsigned guardSize, unsigned headerSize,
+ size_t instBufferAlign, size_t poolMaxOffset,
+ unsigned pcBias, uint32_t alignFillInst, uint32_t nopFillInst,
+ unsigned nopFill = 0)
+ : poolEntryCount(0),
+ guardSize_(guardSize),
+ headerSize_(headerSize),
+ poolMaxOffset_(poolMaxOffset),
+ pcBias_(pcBias),
+ pool_(poolMaxOffset, pcBias, this->lifoAlloc_),
+ instBufferAlign_(instBufferAlign),
+ poolInfo_(this->lifoAlloc_),
+ branchDeadlines_(this->lifoAlloc_),
+ canNotPlacePool_(false),
+#ifdef DEBUG
+ canNotPlacePoolStartOffset_(0),
+ canNotPlacePoolMaxInst_(0),
+#endif
+ alignFillInst_(alignFillInst),
+ nopFillInst_(nopFillInst),
+ nopFill_(nopFill),
+ inhibitNops_(false),
+ id(-1)
+ { }
+
+ // We need to wait until an AutoJitContextAlloc is created by the
+ // MacroAssembler before allocating any space.
+ void initWithAllocator() {
+ // We hand out references to lifoAlloc_ in the constructor.
+ // Check that no allocations were made then.
+ MOZ_ASSERT(this->lifoAlloc_.isEmpty(), "Illegal LIFO allocations before AutoJitContextAlloc");
+ }
+
+ private:
+ size_t sizeExcludingCurrentPool() const {
+ // Return the actual size of the buffer, excluding the current pending
+ // pool.
+ return this->nextOffset().getOffset();
+ }
+
+ public:
+ size_t size() const {
+ // Return the current actual size of the buffer. This is only accurate
+ // if there are no pending pool entries to dump, check.
+ MOZ_ASSERT_IF(!this->oom(), pool_.numEntries() == 0);
+ return sizeExcludingCurrentPool();
+ }
+
+ private:
+ void insertNopFill() {
+ // Insert fill for testing.
+ if (nopFill_ > 0 && !inhibitNops_ && !canNotPlacePool_) {
+ inhibitNops_ = true;
+
+ // Fill using a branch-nop rather than a NOP so this can be
+ // distinguished and skipped.
+ for (size_t i = 0; i < nopFill_; i++)
+ putInt(nopFillInst_);
+
+ inhibitNops_ = false;
+ }
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+ static const unsigned DUMMY_INDEX = unsigned(-2);
+
+ // Check if it is possible to add numInst instructions and numPoolEntries
+ // constant pool entries without needing to flush the current pool.
+ bool hasSpaceForInsts(unsigned numInsts, unsigned numPoolEntries) const
+ {
+ size_t nextOffset = sizeExcludingCurrentPool();
+ // Earliest starting offset for the current pool after adding numInsts.
+ // This is the beginning of the pool entries proper, after inserting a
+ // guard branch + pool header.
+ size_t poolOffset = nextOffset + (numInsts + guardSize_ + headerSize_) * InstSize;
+
+ // Any constant pool loads that would go out of range?
+ if (pool_.checkFull(poolOffset))
+ return false;
+
+ // Any short-range branch that would go out of range?
+ if (!branchDeadlines_.empty()) {
+ size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+ size_t poolEnd =
+ poolOffset + pool_.getPoolSize() + numPoolEntries * sizeof(PoolAllocUnit);
+
+ // When NumShortBranchRanges > 1, is is possible for branch deadlines to expire faster
+ // than we can insert veneers. Suppose branches are 4 bytes each, we could have the
+ // following deadline set:
+ //
+ // Range 0: 40, 44, 48
+ // Range 1: 44, 48
+ //
+ // It is not good enough to start inserting veneers at the 40 deadline; we would not be
+ // able to create veneers for the second 44 deadline. Instead, we need to start at 32:
+ //
+ // 32: veneer(40)
+ // 36: veneer(44)
+ // 40: veneer(44)
+ // 44: veneer(48)
+ // 48: veneer(48)
+ //
+ // This is a pretty conservative solution to the problem: If we begin at the earliest
+ // deadline, we can always emit all veneers for the range that currently has the most
+ // pending deadlines. That may not leave room for veneers for the remaining ranges, so
+ // reserve space for those secondary range veneers assuming the worst case deadlines.
+
+ // Total pending secondary range veneer size.
+ size_t secondaryVeneers =
+ guardSize_ * (branchDeadlines_.size() - branchDeadlines_.maxRangeSize());
+
+ if (deadline < poolEnd + secondaryVeneers)
+ return false;
+ }
+
+ return true;
+ }
+
+ unsigned insertEntryForwards(unsigned numInst, unsigned numPoolEntries, uint8_t* inst, uint8_t* data) {
+ // If inserting pool entries then find a new limiter before we do the
+ // range check.
+ if (numPoolEntries)
+ pool_.updateLimiter(BufferOffset(sizeExcludingCurrentPool()));
+
+ if (!hasSpaceForInsts(numInst, numPoolEntries)) {
+ if (numPoolEntries)
+ JitSpew(JitSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
+ else
+ JitSpew(JitSpew_Pools, "[%d] Inserting instruction(%" PRIuSIZE ") caused a spill", id,
+ sizeExcludingCurrentPool());
+
+ finishPool();
+ if (this->oom())
+ return OOM_FAIL;
+ return insertEntryForwards(numInst, numPoolEntries, inst, data);
+ }
+ if (numPoolEntries) {
+ unsigned result = pool_.insertEntry(numPoolEntries, data, this->nextOffset(), this->lifoAlloc_);
+ if (result == Pool::OOM_FAIL) {
+ this->fail_oom();
+ return OOM_FAIL;
+ }
+ return result;
+ }
+
+ // The pool entry index is returned above when allocating an entry, but
+ // when not allocating an entry a dummy value is returned - it is not
+ // expected to be used by the caller.
+ return DUMMY_INDEX;
+ }
+
+ public:
+ // Get the next buffer offset where an instruction would be inserted.
+ // This may flush the current constant pool before returning nextOffset().
+ BufferOffset nextInstrOffset()
+ {
+ if (!hasSpaceForInsts(/* numInsts= */ 1, /* numPoolEntries= */ 0)) {
+ JitSpew(JitSpew_Pools, "[%d] nextInstrOffset @ %d caused a constant pool spill", id,
+ this->nextOffset().getOffset());
+ finishPool();
+ }
+ return this->nextOffset();
+ }
+
+ BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data, PoolEntry* pe = nullptr,
+ bool markAsBranch = false)
+ {
+ // The allocation of pool entries is not supported in a no-pool region,
+ // check.
+ MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_);
+
+ if (this->oom() && !this->bail())
+ return BufferOffset();
+
+ insertNopFill();
+
+#ifdef JS_JITSPEW
+ if (numPoolEntries && JitSpewEnabled(JitSpew_Pools)) {
+ JitSpew(JitSpew_Pools, "[%d] Inserting %d entries into pool", id, numPoolEntries);
+ JitSpewStart(JitSpew_Pools, "[%d] data is: 0x", id);
+ size_t length = numPoolEntries * sizeof(PoolAllocUnit);
+ for (unsigned idx = 0; idx < length; idx++) {
+ JitSpewCont(JitSpew_Pools, "%02x", data[length - idx - 1]);
+ if (((idx & 3) == 3) && (idx + 1 != length))
+ JitSpewCont(JitSpew_Pools, "_");
+ }
+ JitSpewFin(JitSpew_Pools);
+ }
+#endif
+
+ // Insert the pool value.
+ unsigned index = insertEntryForwards(numInst, numPoolEntries, inst, data);
+ if (this->oom())
+ return BufferOffset();
+
+ // Now to get an instruction to write.
+ PoolEntry retPE;
+ if (numPoolEntries) {
+ JitSpew(JitSpew_Pools, "[%d] Entry has index %u, offset %" PRIuSIZE, id, index,
+ sizeExcludingCurrentPool());
+ Asm::InsertIndexIntoTag(inst, index);
+ // Figure out the offset within the pool entries.
+ retPE = PoolEntry(poolEntryCount);
+ poolEntryCount += numPoolEntries;
+ }
+ // Now inst is a valid thing to insert into the instruction stream.
+ if (pe != nullptr)
+ *pe = retPE;
+ return this->putBytes(numInst * InstSize, inst);
+ }
+
+ BufferOffset putInt(uint32_t value, bool markAsBranch = false) {
+ return allocEntry(1, 0, (uint8_t*)&value, nullptr, nullptr, markAsBranch);
+ }
+
+ // Register a short-range branch deadline.
+ //
+ // After inserting a short-range forward branch, call this method to
+ // register the branch 'deadline' which is the last buffer offset that the
+ // branch instruction can reach.
+ //
+ // When the branch is bound to a destination label, call
+ // unregisterBranchDeadline() to stop tracking this branch,
+ //
+ // If the assembled code is about to exceed the registered branch deadline,
+ // and unregisterBranchDeadline() has not yet been called, an
+ // instruction-sized constant pool entry is allocated before the branch
+ // deadline.
+ //
+ // rangeIdx
+ // A number < NumShortBranchRanges identifying the range of the branch.
+ //
+ // deadline
+ // The highest buffer offset the the short-range branch can reach
+ // directly.
+ //
+ void registerBranchDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ if (!this->oom() && !branchDeadlines_.addDeadline(rangeIdx, deadline))
+ this->fail_oom();
+ }
+
+ // Un-register a short-range branch deadline.
+ //
+ // When a short-range branch has been successfully bound to its destination
+ // label, call this function to stop traching the branch.
+ //
+ // The (rangeIdx, deadline) pair must be previously registered.
+ //
+ void unregisterBranchDeadline(unsigned rangeIdx, BufferOffset deadline)
+ {
+ if (!this->oom())
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+ }
+
+ private:
+ // Are any short-range branches about to expire?
+ bool hasExpirableShortRangeBranches() const
+ {
+ if (branchDeadlines_.empty())
+ return false;
+
+ // Include branches that would expire in the next N bytes.
+ // The hysteresis avoids the needless creation of many tiny constant
+ // pools.
+ return this->nextOffset().getOffset() + ShortRangeBranchHysteresis >
+ size_t(branchDeadlines_.earliestDeadline().getOffset());
+ }
+
+ void finishPool() {
+ JitSpew(JitSpew_Pools, "[%d] Attempting to finish pool %" PRIuSIZE " with %u entries.",
+ id, poolInfo_.length(), pool_.numEntries());
+
+ if (pool_.numEntries() == 0 && !hasExpirableShortRangeBranches()) {
+ // If there is no data in the pool being dumped, don't dump anything.
+ JitSpew(JitSpew_Pools, "[%d] Aborting because the pool is empty", id);
+ return;
+ }
+
+ // Should not be placing a pool in a no-pool region, check.
+ MOZ_ASSERT(!canNotPlacePool_);
+
+ // Dump the pool with a guard branch around the pool.
+ BufferOffset guard = this->putBytes(guardSize_ * InstSize, nullptr);
+ BufferOffset header = this->putBytes(headerSize_ * InstSize, nullptr);
+ BufferOffset data =
+ this->putBytesLarge(pool_.getPoolSize(), (const uint8_t*)pool_.poolData());
+ if (this->oom())
+ return;
+
+ // Now generate branch veneers for any short-range branches that are
+ // about to expire.
+ while (hasExpirableShortRangeBranches()) {
+ unsigned rangeIdx = branchDeadlines_.earliestDeadlineRange();
+ BufferOffset deadline = branchDeadlines_.earliestDeadline();
+
+ // Stop tracking this branch. The Asm callback below may register
+ // new branches to track.
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+
+ // Make room for the veneer. Same as a pool guard branch.
+ BufferOffset veneer = this->putBytes(guardSize_ * InstSize, nullptr);
+ if (this->oom())
+ return;
+
+ // Fix the branch so it targets the veneer.
+ // The Asm class knows how to find the original branch given the
+ // (rangeIdx, deadline) pair.
+ Asm::PatchShortRangeBranchToVeneer(this, rangeIdx, deadline, veneer);
+ }
+
+ // We only reserved space for the guard branch and pool header.
+ // Fill them in.
+ BufferOffset afterPool = this->nextOffset();
+ Asm::WritePoolGuard(guard, this->getInst(guard), afterPool);
+ Asm::WritePoolHeader((uint8_t*)this->getInst(header), &pool_, false);
+
+ // With the pool's final position determined it is now possible to patch
+ // the instructions that reference entries in this pool, and this is
+ // done incrementally as each pool is finished.
+ size_t poolOffset = data.getOffset();
+
+ unsigned idx = 0;
+ for (BufferOffset* iter = pool_.loadOffsets.begin();
+ iter != pool_.loadOffsets.end();
+ ++iter, ++idx)
+ {
+ // All entries should be before the pool.
+ MOZ_ASSERT(iter->getOffset() < guard.getOffset());
+
+ // Everything here is known so we can safely do the necessary
+ // substitutions.
+ Inst* inst = this->getInst(*iter);
+ size_t codeOffset = poolOffset - iter->getOffset();
+
+ // That is, PatchConstantPoolLoad wants to be handed the address of
+ // the pool entry that is being loaded. We need to do a non-trivial
+ // amount of math here, since the pool that we've made does not
+ // actually reside there in memory.
+ JitSpew(JitSpew_Pools, "[%d] Fixing entry %d offset to %" PRIuSIZE, id, idx, codeOffset);
+ Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset);
+ }
+
+ // Record the pool info.
+ unsigned firstEntry = poolEntryCount - pool_.numEntries();
+ if (!poolInfo_.append(PoolInfo(firstEntry, data))) {
+ this->fail_oom();
+ return;
+ }
+
+ // Reset everything to the state that it was in when we started.
+ pool_.reset();
+ }
+
+ public:
+ void flushPool() {
+ if (this->oom())
+ return;
+ JitSpew(JitSpew_Pools, "[%d] Requesting a pool flush", id);
+ finishPool();
+ }
+
+ void enterNoPool(size_t maxInst) {
+ // Don't allow re-entry.
+ MOZ_ASSERT(!canNotPlacePool_);
+ insertNopFill();
+
+ // Check if the pool will spill by adding maxInst instructions, and if
+ // so then finish the pool before entering the no-pool region. It is
+ // assumed that no pool entries are allocated in a no-pool region and
+ // this is asserted when allocating entries.
+ if (!hasSpaceForInsts(maxInst, 0)) {
+ JitSpew(JitSpew_Pools, "[%d] No-Pool instruction(%" PRIuSIZE ") caused a spill.", id,
+ sizeExcludingCurrentPool());
+ finishPool();
+ }
+
+#ifdef DEBUG
+ // Record the buffer position to allow validating maxInst when leaving
+ // the region.
+ canNotPlacePoolStartOffset_ = this->nextOffset().getOffset();
+ canNotPlacePoolMaxInst_ = maxInst;
+#endif
+
+ canNotPlacePool_ = true;
+ }
+
+ void leaveNoPool() {
+ MOZ_ASSERT(canNotPlacePool_);
+ canNotPlacePool_ = false;
+
+ // Validate the maxInst argument supplied to enterNoPool().
+ MOZ_ASSERT(this->nextOffset().getOffset() - canNotPlacePoolStartOffset_ <= canNotPlacePoolMaxInst_ * InstSize);
+ }
+
+ void align(unsigned alignment) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ MOZ_ASSERT(alignment >= InstSize);
+
+ // A pool many need to be dumped at this point, so insert NOP fill here.
+ insertNopFill();
+
+ // Check if the code position can be aligned without dumping a pool.
+ unsigned requiredFill = sizeExcludingCurrentPool() & (alignment - 1);
+ if (requiredFill == 0)
+ return;
+ requiredFill = alignment - requiredFill;
+
+ // Add an InstSize because it is probably not useful for a pool to be
+ // dumped at the aligned code position.
+ if (!hasSpaceForInsts(requiredFill / InstSize + 1, 0)) {
+ // Alignment would cause a pool dump, so dump the pool now.
+ JitSpew(JitSpew_Pools, "[%d] Alignment of %d at %" PRIuSIZE " caused a spill.",
+ id, alignment, sizeExcludingCurrentPool());
+ finishPool();
+ }
+
+ inhibitNops_ = true;
+ while ((sizeExcludingCurrentPool() & (alignment - 1)) && !this->oom())
+ putInt(alignFillInst_);
+ inhibitNops_ = false;
+ }
+
+ public:
+ void executableCopy(uint8_t* dest) {
+ if (this->oom())
+ return;
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ for (Slice* cur = getHead(); cur != nullptr; cur = cur->getNext()) {
+ memcpy(dest, &cur->instructions[0], cur->length());
+ dest += cur->length();
+ }
+ }
+
+ bool appendBuffer(const AssemblerBufferWithConstantPools& other) {
+ if (this->oom())
+ return false;
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ for (Slice* cur = other.getHead(); cur != nullptr; cur = cur->getNext()) {
+ this->putBytes(cur->length(), &cur->instructions[0]);
+ if (this->oom())
+ return false;
+ }
+ return true;
+ }
+
+ public:
+ size_t poolEntryOffset(PoolEntry pe) const {
+ MOZ_ASSERT(pe.index() < poolEntryCount - pool_.numEntries(),
+ "Invalid pool entry, or not flushed yet.");
+ // Find the pool containing pe.index().
+ // The array is sorted, so we can use a binary search.
+ auto b = poolInfo_.begin(), e = poolInfo_.end();
+ // A note on asymmetric types in the upper_bound comparator:
+ // http://permalink.gmane.org/gmane.comp.compilers.clang.devel/10101
+ auto i = std::upper_bound(b, e, pe.index(), [](size_t value, const PoolInfo& entry) {
+ return value < entry.firstEntryIndex;
+ });
+ // Since upper_bound finds the first pool greater than pe,
+ // we want the previous one which is the last one less than or equal.
+ MOZ_ASSERT(i != b, "PoolInfo not sorted or empty?");
+ --i;
+ // The i iterator now points to the pool containing pe.index.
+ MOZ_ASSERT(i->firstEntryIndex <= pe.index() &&
+ (i + 1 == e || (i + 1)->firstEntryIndex > pe.index()));
+ // Compute the byte offset into the pool.
+ unsigned relativeIndex = pe.index() - i->firstEntryIndex;
+ return i->offset.getOffset() + relativeIndex * sizeof(PoolAllocUnit);
+ }
+};
+
+} // namespace ion
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBufferWithConstantPools_h
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
new file mode 100644
index 000000000..a352f5d8a
--- /dev/null
+++ b/js/src/jit/shared/LIR-shared.h
@@ -0,0 +1,8904 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_LIR_shared_h
+#define jit_shared_LIR_shared_h
+
+#include "jsutil.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/shared/Assembler-shared.h"
+
+// This file declares LIR instructions that are common to every platform.
+
+namespace js {
+namespace jit {
+
+class LBox : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(Box);
+
+ LBox(const LAllocation& payload, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, payload);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+template <size_t Temps, size_t ExtraUses = 0>
+class LBinaryMath : public LInstructionHelper<1, 2 + ExtraUses, Temps>
+{
+ public:
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+};
+
+// An LOsiPoint captures a snapshot after a call and ensures enough space to
+// patch in a call to the invalidation mechanism.
+//
+// Note: LSafepoints are 1:1 with LOsiPoints, so it holds a reference to the
+// corresponding LSafepoint to inform it of the LOsiPoint's masm offset when it
+// gets CG'd.
+class LOsiPoint : public LInstructionHelper<0, 0, 0>
+{
+ LSafepoint* safepoint_;
+
+ public:
+ LOsiPoint(LSafepoint* safepoint, LSnapshot* snapshot)
+ : safepoint_(safepoint)
+ {
+ MOZ_ASSERT(safepoint && snapshot);
+ assignSnapshot(snapshot);
+ }
+
+ LSafepoint* associatedSafepoint() {
+ return safepoint_;
+ }
+
+ LIR_HEADER(OsiPoint)
+};
+
+class LMove
+{
+ LAllocation from_;
+ LAllocation to_;
+ LDefinition::Type type_;
+
+ public:
+ LMove(LAllocation from, LAllocation to, LDefinition::Type type)
+ : from_(from),
+ to_(to),
+ type_(type)
+ { }
+
+ LAllocation from() const {
+ return from_;
+ }
+ LAllocation to() const {
+ return to_;
+ }
+ LDefinition::Type type() const {
+ return type_;
+ }
+};
+
+class LMoveGroup : public LInstructionHelper<0, 0, 0>
+{
+ js::Vector<LMove, 2, JitAllocPolicy> moves_;
+
+#ifdef JS_CODEGEN_X86
+ // Optional general register available for use when executing moves.
+ LAllocation scratchRegister_;
+#endif
+
+ explicit LMoveGroup(TempAllocator& alloc)
+ : moves_(alloc)
+ { }
+
+ public:
+ LIR_HEADER(MoveGroup)
+
+ static LMoveGroup* New(TempAllocator& alloc) {
+ return new(alloc) LMoveGroup(alloc);
+ }
+
+ void printOperands(GenericPrinter& out);
+
+ // Add a move which takes place simultaneously with all others in the group.
+ bool add(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ // Add a move which takes place after existing moves in the group.
+ bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ size_t numMoves() const {
+ return moves_.length();
+ }
+ const LMove& getMove(size_t i) const {
+ return moves_[i];
+ }
+
+#ifdef JS_CODEGEN_X86
+ void setScratchRegister(Register reg) {
+ scratchRegister_ = LGeneralReg(reg);
+ }
+ LAllocation maybeScratchRegister() {
+ return scratchRegister_;
+ }
+#endif
+
+ bool uses(Register reg) {
+ for (size_t i = 0; i < numMoves(); i++) {
+ LMove move = getMove(i);
+ if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg))
+ return true;
+ }
+ return false;
+ }
+};
+
+
+// Constructs a SIMD object (value type) based on the MIRType of its input.
+class LSimdBox : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(SimdBox)
+
+ explicit LSimdBox(const LAllocation& simd, const LDefinition& temp)
+ {
+ setOperand(0, simd);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MSimdBox* mir() const {
+ return mir_->toSimdBox();
+ }
+};
+
+class LSimdUnbox : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(SimdUnbox)
+
+ LSimdUnbox(const LAllocation& obj, const LDefinition& temp)
+ {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MSimdUnbox* mir() const {
+ return mir_->toSimdUnbox();
+ }
+};
+
+// Constructs a SIMD value with 16 equal components (int8x16).
+class LSimdSplatX16 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdSplatX16)
+ explicit LSimdSplatX16(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdSplat* mir() const {
+ return mir_->toSimdSplat();
+ }
+};
+
+// Constructs a SIMD value with 8 equal components (int16x8).
+class LSimdSplatX8 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdSplatX8)
+ explicit LSimdSplatX8(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdSplat* mir() const {
+ return mir_->toSimdSplat();
+ }
+};
+
+// Constructs a SIMD value with 4 equal components (e.g. int32x4, float32x4).
+class LSimdSplatX4 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdSplatX4)
+ explicit LSimdSplatX4(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdSplat* mir() const {
+ return mir_->toSimdSplat();
+ }
+};
+
+// Reinterpret the bits of a SIMD value with a different type.
+class LSimdReinterpretCast : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdReinterpretCast)
+ explicit LSimdReinterpretCast(const LAllocation& v)
+ {
+ setOperand(0, v);
+ }
+
+ MSimdReinterpretCast* mir() const {
+ return mir_->toSimdReinterpretCast();
+ }
+};
+
+class LSimdExtractElementBase : public LInstructionHelper<1, 1, 0>
+{
+ protected:
+ explicit LSimdExtractElementBase(const LAllocation& base) {
+ setOperand(0, base);
+ }
+
+ public:
+ const LAllocation* getBase() {
+ return getOperand(0);
+ }
+ MSimdExtractElement* mir() const {
+ return mir_->toSimdExtractElement();
+ }
+};
+
+// Extracts an element from a given SIMD bool32x4 lane.
+class LSimdExtractElementB : public LSimdExtractElementBase
+{
+ public:
+ LIR_HEADER(SimdExtractElementB);
+ explicit LSimdExtractElementB(const LAllocation& base)
+ : LSimdExtractElementBase(base)
+ {}
+};
+
+// Extracts an element from a given SIMD int32x4 lane.
+class LSimdExtractElementI : public LSimdExtractElementBase
+{
+ public:
+ LIR_HEADER(SimdExtractElementI);
+ explicit LSimdExtractElementI(const LAllocation& base)
+ : LSimdExtractElementBase(base)
+ {}
+};
+
+// Extracts an element from a given SIMD float32x4 lane.
+class LSimdExtractElementF : public LSimdExtractElementBase
+{
+ public:
+ LIR_HEADER(SimdExtractElementF);
+ explicit LSimdExtractElementF(const LAllocation& base)
+ : LSimdExtractElementBase(base)
+ {}
+};
+
+// Extracts an element from an Uint32x4 SIMD vector, converts to double.
+class LSimdExtractElementU2D : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(SimdExtractElementU2D);
+ explicit LSimdExtractElementU2D(const LAllocation& base, const LDefinition& temp) {
+ setOperand(0, base);
+ setTemp(0, temp);
+ }
+ MSimdExtractElement* mir() const {
+ return mir_->toSimdExtractElement();
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+
+class LSimdInsertElementBase : public LInstructionHelper<1, 2, 0>
+{
+ protected:
+ LSimdInsertElementBase(const LAllocation& vec, const LAllocation& val)
+ {
+ setOperand(0, vec);
+ setOperand(1, val);
+ }
+
+ public:
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ unsigned lane() const {
+ return mir_->toSimdInsertElement()->lane();
+ }
+ unsigned length() const {
+ return SimdTypeToLength(mir_->toSimdInsertElement()->type());
+ }
+};
+
+// Replace an element from a given SIMD integer or boolean lane with a given value.
+// The value inserted into a boolean lane should be 0 or -1.
+class LSimdInsertElementI : public LSimdInsertElementBase
+{
+ public:
+ LIR_HEADER(SimdInsertElementI);
+ LSimdInsertElementI(const LAllocation& vec, const LAllocation& val)
+ : LSimdInsertElementBase(vec, val)
+ {}
+};
+
+// Replace an element from a given SIMD float32x4 lane with a given value.
+class LSimdInsertElementF : public LSimdInsertElementBase
+{
+ public:
+ LIR_HEADER(SimdInsertElementF);
+ LSimdInsertElementF(const LAllocation& vec, const LAllocation& val)
+ : LSimdInsertElementBase(vec, val)
+ {}
+};
+
+// Base class for both int32x4 and float32x4 shuffle instructions.
+class LSimdSwizzleBase : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ explicit LSimdSwizzleBase(const LAllocation& base)
+ {
+ setOperand(0, base);
+ }
+
+ const LAllocation* getBase() {
+ return getOperand(0);
+ }
+
+ unsigned numLanes() const { return mir_->toSimdSwizzle()->numLanes(); }
+ uint32_t lane(unsigned i) const { return mir_->toSimdSwizzle()->lane(i); }
+
+ bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+ return mir_->toSimdSwizzle()->lanesMatch(x, y, z, w);
+ }
+};
+
+// Shuffles a int32x4 into another int32x4 vector.
+class LSimdSwizzleI : public LSimdSwizzleBase
+{
+ public:
+ LIR_HEADER(SimdSwizzleI);
+ explicit LSimdSwizzleI(const LAllocation& base) : LSimdSwizzleBase(base)
+ {}
+};
+// Shuffles a float32x4 into another float32x4 vector.
+class LSimdSwizzleF : public LSimdSwizzleBase
+{
+ public:
+ LIR_HEADER(SimdSwizzleF);
+ explicit LSimdSwizzleF(const LAllocation& base) : LSimdSwizzleBase(base)
+ {}
+};
+
+class LSimdGeneralShuffleBase : public LVariadicInstruction<1, 1>
+{
+ public:
+ explicit LSimdGeneralShuffleBase(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+ const LAllocation* vector(unsigned i) {
+ MOZ_ASSERT(i < mir()->numVectors());
+ return getOperand(i);
+ }
+ const LAllocation* lane(unsigned i) {
+ MOZ_ASSERT(i < mir()->numLanes());
+ return getOperand(mir()->numVectors() + i);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MSimdGeneralShuffle* mir() const {
+ return mir_->toSimdGeneralShuffle();
+ }
+};
+
+class LSimdGeneralShuffleI : public LSimdGeneralShuffleBase
+{
+ public:
+ LIR_HEADER(SimdGeneralShuffleI);
+ explicit LSimdGeneralShuffleI(const LDefinition& temp)
+ : LSimdGeneralShuffleBase(temp)
+ {}
+};
+
+class LSimdGeneralShuffleF : public LSimdGeneralShuffleBase
+{
+ public:
+ LIR_HEADER(SimdGeneralShuffleF);
+ explicit LSimdGeneralShuffleF(const LDefinition& temp)
+ : LSimdGeneralShuffleBase(temp)
+ {}
+};
+
+// Base class for both int32x4 and float32x4 shuffle instructions.
+class LSimdShuffleX4 : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(SimdShuffleX4);
+ LSimdShuffleX4()
+ {}
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ uint32_t lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
+
+ bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
+ return mir_->toSimdShuffle()->lanesMatch(x, y, z, w);
+ }
+};
+
+// Remaining shuffles (8x16, 16x8).
+class LSimdShuffle : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(SimdShuffle);
+ LSimdShuffle()
+ {}
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ unsigned numLanes() const { return mir_->toSimdShuffle()->numLanes(); }
+ unsigned lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
+};
+
+// Binary SIMD comparison operation between two SIMD operands
+class LSimdBinaryComp: public LInstructionHelper<1, 2, 0>
+{
+ protected:
+ LSimdBinaryComp() {}
+
+public:
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ MSimdBinaryComp::Operation operation() const {
+ return mir_->toSimdBinaryComp()->operation();
+ }
+ const char* extraName() const {
+ return MSimdBinaryComp::OperationName(operation());
+ }
+};
+
+// Binary SIMD comparison operation between two Int8x16 operands.
+class LSimdBinaryCompIx16 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompIx16);
+ LSimdBinaryCompIx16() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD comparison operation between two Int16x8 operands.
+class LSimdBinaryCompIx8 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompIx8);
+ LSimdBinaryCompIx8() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD comparison operation between two Int32x4 operands.
+class LSimdBinaryCompIx4 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompIx4);
+ LSimdBinaryCompIx4() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD comparison operation between two Float32x4 operands
+class LSimdBinaryCompFx4 : public LSimdBinaryComp
+{
+ public:
+ LIR_HEADER(SimdBinaryCompFx4);
+ LSimdBinaryCompFx4() : LSimdBinaryComp() {}
+};
+
+// Binary SIMD arithmetic operation between two SIMD operands
+class LSimdBinaryArith : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LSimdBinaryArith() {}
+
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MSimdBinaryArith::Operation operation() const {
+ return this->mir_->toSimdBinaryArith()->operation();
+ }
+ const char* extraName() const {
+ return MSimdBinaryArith::OperationName(operation());
+ }
+};
+
+// Binary SIMD arithmetic operation between two Int8x16 operands
+class LSimdBinaryArithIx16 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithIx16);
+ LSimdBinaryArithIx16() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD arithmetic operation between two Int16x8 operands
+class LSimdBinaryArithIx8 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithIx8);
+ LSimdBinaryArithIx8() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD arithmetic operation between two Int32x4 operands
+class LSimdBinaryArithIx4 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithIx4);
+ LSimdBinaryArithIx4() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD arithmetic operation between two Float32x4 operands
+class LSimdBinaryArithFx4 : public LSimdBinaryArith
+{
+ public:
+ LIR_HEADER(SimdBinaryArithFx4);
+ LSimdBinaryArithFx4() : LSimdBinaryArith() {}
+};
+
+// Binary SIMD saturating arithmetic operation between two SIMD operands
+class LSimdBinarySaturating : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(SimdBinarySaturating);
+ LSimdBinarySaturating() {}
+
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+
+ MSimdBinarySaturating::Operation operation() const {
+ return this->mir_->toSimdBinarySaturating()->operation();
+ }
+ SimdSign signedness() const {
+ return this->mir_->toSimdBinarySaturating()->signedness();
+ }
+ MIRType type() const {
+ return mir_->type();
+ }
+ const char* extraName() const {
+ return MSimdBinarySaturating::OperationName(operation());
+ }
+};
+
+// Unary SIMD arithmetic operation on a SIMD operand
+class LSimdUnaryArith : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ explicit LSimdUnaryArith(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ MSimdUnaryArith::Operation operation() const {
+ return mir_->toSimdUnaryArith()->operation();
+ }
+};
+
+// Unary SIMD arithmetic operation on a Int8x16 operand
+class LSimdUnaryArithIx16 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithIx16);
+ explicit LSimdUnaryArithIx16(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Unary SIMD arithmetic operation on a Int16x8 operand
+class LSimdUnaryArithIx8 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithIx8);
+ explicit LSimdUnaryArithIx8(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Unary SIMD arithmetic operation on a Int32x4 operand
+class LSimdUnaryArithIx4 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithIx4);
+ explicit LSimdUnaryArithIx4(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Unary SIMD arithmetic operation on a Float32x4 operand
+class LSimdUnaryArithFx4 : public LSimdUnaryArith
+{
+ public:
+ LIR_HEADER(SimdUnaryArithFx4);
+ explicit LSimdUnaryArithFx4(const LAllocation& in) : LSimdUnaryArith(in) {}
+};
+
+// Binary SIMD bitwise operation between two 128-bit operands.
+class LSimdBinaryBitwise : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(SimdBinaryBitwise);
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return getOperand(1);
+ }
+ MSimdBinaryBitwise::Operation operation() const {
+ return mir_->toSimdBinaryBitwise()->operation();
+ }
+ const char* extraName() const {
+ return MSimdBinaryBitwise::OperationName(operation());
+ }
+ MIRType type() const {
+ return mir_->type();
+ }
+};
+
+// Shift a SIMD vector by a scalar amount.
+// The temp register is only required if the shift amount is a dynamical
+// value. If it is a constant, use a BogusTemp instead.
+class LSimdShift : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(SimdShift)
+ LSimdShift(const LAllocation& vec, const LAllocation& val, const LDefinition& temp) {
+ setOperand(0, vec);
+ setOperand(1, val);
+ setTemp(0, temp);
+ }
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MSimdShift::Operation operation() const {
+ return mir_->toSimdShift()->operation();
+ }
+ const char* extraName() const {
+ return MSimdShift::OperationName(operation());
+ }
+ MSimdShift* mir() const {
+ return mir_->toSimdShift();
+ }
+ MIRType type() const {
+ return mir_->type();
+ }
+};
+
+// SIMD selection of lanes from two int32x4 or float32x4 arguments based on a
+// int32x4 argument.
+class LSimdSelect : public LInstructionHelper<1, 3, 1>
+{
+ public:
+ LIR_HEADER(SimdSelect);
+ const LAllocation* mask() {
+ return getOperand(0);
+ }
+ const LAllocation* lhs() {
+ return getOperand(1);
+ }
+ const LAllocation* rhs() {
+ return getOperand(2);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MSimdSelect* mir() const {
+ return mir_->toSimdSelect();
+ }
+};
+
+class LSimdAnyTrue : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdAnyTrue)
+ explicit LSimdAnyTrue(const LAllocation& input) {
+ setOperand(0, input);
+ }
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ MSimdAnyTrue* mir() const {
+ return mir_->toSimdAnyTrue();
+ }
+};
+
+class LSimdAllTrue : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SimdAllTrue)
+ explicit LSimdAllTrue(const LAllocation& input) {
+ setOperand(0, input);
+ }
+ const LAllocation* vector() {
+ return getOperand(0);
+ }
+ MSimdAllTrue* mir() const {
+ return mir_->toSimdAllTrue();
+ }
+};
+
+
+// Constant 32-bit integer.
+class LInteger : public LInstructionHelper<1, 0, 0>
+{
+ int32_t i32_;
+
+ public:
+ LIR_HEADER(Integer)
+
+ explicit LInteger(int32_t i32)
+ : i32_(i32)
+ { }
+
+ int32_t getValue() const {
+ return i32_;
+ }
+};
+
+// Constant 64-bit integer.
+class LInteger64 : public LInstructionHelper<INT64_PIECES, 0, 0>
+{
+ int64_t i64_;
+
+ public:
+ LIR_HEADER(Integer64)
+
+ explicit LInteger64(int64_t i64)
+ : i64_(i64)
+ { }
+
+ int64_t getValue() const {
+ return i64_;
+ }
+};
+
+// Constant pointer.
+class LPointer : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ enum Kind {
+ GC_THING,
+ NON_GC_THING
+ };
+
+ private:
+ void* ptr_;
+ Kind kind_;
+
+ public:
+ LIR_HEADER(Pointer)
+
+ explicit LPointer(gc::Cell* ptr)
+ : ptr_(ptr), kind_(GC_THING)
+ { }
+
+ LPointer(void* ptr, Kind kind)
+ : ptr_(ptr), kind_(kind)
+ { }
+
+ void* ptr() const {
+ return ptr_;
+ }
+ Kind kind() const {
+ return kind_;
+ }
+ const char* extraName() const {
+ return kind_ == GC_THING ? "GC_THING" : "NON_GC_THING";
+ }
+
+ gc::Cell* gcptr() const {
+ MOZ_ASSERT(kind() == GC_THING);
+ return (gc::Cell*) ptr_;
+ }
+};
+
+// Constant double.
+class LDouble : public LInstructionHelper<1, 0, 0>
+{
+ wasm::RawF64 d_;
+ public:
+ LIR_HEADER(Double);
+
+ explicit LDouble(wasm::RawF64 d) : d_(d)
+ { }
+
+ wasm::RawF64 getDouble() const {
+ return d_;
+ }
+};
+
+// Constant float32.
+class LFloat32 : public LInstructionHelper<1, 0, 0>
+{
+ wasm::RawF32 f_;
+ public:
+ LIR_HEADER(Float32);
+
+ explicit LFloat32(wasm::RawF32 f)
+ : f_(f)
+ { }
+
+ wasm::RawF32 getFloat() const {
+ return f_;
+ }
+};
+
+// Constant 128-bit SIMD integer vector (8x16, 16x8, 32x4).
+// Also used for Bool32x4, Bool16x8, etc.
+class LSimd128Int : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Simd128Int);
+
+ explicit LSimd128Int() {}
+ const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
+};
+
+// Constant 128-bit SIMD floating point vector (32x4, 64x2).
+class LSimd128Float : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Simd128Float);
+
+ explicit LSimd128Float() {}
+ const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
+};
+
+// A constant Value.
+class LValue : public LInstructionHelper<BOX_PIECES, 0, 0>
+{
+ Value v_;
+
+ public:
+ LIR_HEADER(Value)
+
+ explicit LValue(const Value& v)
+ : v_(v)
+ { }
+
+ Value value() const {
+ return v_;
+ }
+};
+
+// Clone an object literal such as we are not modifying the object contained in
+// the sources.
+class LCloneLiteral : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CloneLiteral)
+
+ explicit LCloneLiteral(const LAllocation& obj)
+ {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* getObjectLiteral() {
+ return getOperand(0);
+ }
+
+ MCloneLiteral* mir() const {
+ return mir_->toCloneLiteral();
+ }
+};
+
+// Formal argument for a function, returning a box. Formal arguments are
+// initially read from the stack.
+class LParameter : public LInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(Parameter)
+};
+
+// Stack offset for a word-sized immutable input value to a frame.
+class LCallee : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Callee)
+};
+
+class LIsConstructing : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(IsConstructing)
+};
+
+// Base class for control instructions (goto, branch, etc.)
+template <size_t Succs, size_t Operands, size_t Temps>
+class LControlInstructionHelper : public LInstructionHelper<0, Operands, Temps> {
+
+ mozilla::Array<MBasicBlock*, Succs> successors_;
+
+ public:
+ virtual size_t numSuccessors() const final override { return Succs; }
+
+ virtual MBasicBlock* getSuccessor(size_t i) const final override {
+ return successors_[i];
+ }
+
+ virtual void setSuccessor(size_t i, MBasicBlock* successor) final override {
+ successors_[i] = successor;
+ }
+};
+
+// Jumps to the start of a basic block.
+class LGoto : public LControlInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(Goto)
+
+ explicit LGoto(MBasicBlock* block)
+ {
+ setSuccessor(0, block);
+ }
+
+ MBasicBlock* target() const {
+ return getSuccessor(0);
+ }
+};
+
+class LNewArray : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewArray)
+
+ explicit LNewArray(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewArray* mir() const {
+ return mir_->toNewArray();
+ }
+};
+
+class LNewArrayCopyOnWrite : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewArrayCopyOnWrite)
+
+ explicit LNewArrayCopyOnWrite(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewArrayCopyOnWrite* mir() const {
+ return mir_->toNewArrayCopyOnWrite();
+ }
+};
+
+class LNewArrayDynamicLength : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NewArrayDynamicLength)
+
+ explicit LNewArrayDynamicLength(const LAllocation& length, const LDefinition& temp) {
+ setOperand(0, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* length() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewArrayDynamicLength* mir() const {
+ return mir_->toNewArrayDynamicLength();
+ }
+};
+
+class LNewTypedArray : public LInstructionHelper<1, 0, 2>
+{
+ public:
+ LIR_HEADER(NewTypedArray)
+
+ explicit LNewTypedArray(const LDefinition& temp1, const LDefinition& temp2) {
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+
+ MNewTypedArray* mir() const {
+ return mir_->toNewTypedArray();
+ }
+};
+
+class LNewTypedArrayDynamicLength : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NewTypedArrayDynamicLength)
+
+ explicit LNewTypedArrayDynamicLength(const LAllocation& length, const LDefinition& temp) {
+ setOperand(0, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* length() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewTypedArrayDynamicLength* mir() const {
+ return mir_->toNewTypedArrayDynamicLength();
+ }
+};
+
+class LNewObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewObject)
+
+ explicit LNewObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewObject* mir() const {
+ return mir_->toNewObject();
+ }
+};
+
+class LNewTypedObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewTypedObject)
+
+ explicit LNewTypedObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewTypedObject* mir() const {
+ return mir_->toNewTypedObject();
+ }
+};
+
+// Allocates a new NamedLambdaObject.
+//
+// This instruction generates two possible instruction sets:
+// (1) An inline allocation of the call object is attempted.
+// (2) Otherwise, a callVM create a new object.
+//
+class LNewNamedLambdaObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewNamedLambdaObject);
+
+ explicit LNewNamedLambdaObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewNamedLambdaObject* mir() const {
+ return mir_->toNewNamedLambdaObject();
+ }
+};
+
+// Allocates a new CallObject.
+//
+// This instruction generates two possible instruction sets:
+// (1) If the call object is extensible, this is a callVM to create the
+// call object.
+// (2) Otherwise, an inline allocation of the call object is attempted.
+//
+class LNewCallObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewCallObject)
+
+ explicit LNewCallObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+
+ MNewCallObject* mir() const {
+ return mir_->toNewCallObject();
+ }
+};
+
+// Performs a callVM to allocate a new CallObject with singleton type.
+class LNewSingletonCallObject : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(NewSingletonCallObject)
+
+ explicit LNewSingletonCallObject(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MNewSingletonCallObject* mir() const {
+ return mir_->toNewSingletonCallObject();
+ }
+};
+
+class LNewDerivedTypedObject : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(NewDerivedTypedObject);
+
+ LNewDerivedTypedObject(const LAllocation& type,
+ const LAllocation& owner,
+ const LAllocation& offset) {
+ setOperand(0, type);
+ setOperand(1, owner);
+ setOperand(2, offset);
+ }
+
+ const LAllocation* type() {
+ return getOperand(0);
+ }
+
+ const LAllocation* owner() {
+ return getOperand(1);
+ }
+
+ const LAllocation* offset() {
+ return getOperand(2);
+ }
+};
+
+class LNewStringObject : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NewStringObject)
+
+ LNewStringObject(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MNewStringObject* mir() const {
+ return mir_->toNewStringObject();
+ }
+};
+
+class LInitElem : public LCallInstructionHelper<0, 1 + 2*BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InitElem)
+
+ LInitElem(const LAllocation& object, const LBoxAllocation& id, const LBoxAllocation& value) {
+ setOperand(0, object);
+ setBoxOperand(IdIndex, id);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t IdIndex = 1;
+ static const size_t ValueIndex = 1 + BOX_PIECES;
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ MInitElem* mir() const {
+ return mir_->toInitElem();
+ }
+};
+
+class LInitElemGetterSetter : public LCallInstructionHelper<0, 2 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InitElemGetterSetter)
+
+ LInitElemGetterSetter(const LAllocation& object, const LBoxAllocation& id,
+ const LAllocation& value) {
+ setOperand(0, object);
+ setOperand(1, value);
+ setBoxOperand(IdIndex, id);
+ }
+
+ static const size_t IdIndex = 2;
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ MInitElemGetterSetter* mir() const {
+ return mir_->toInitElemGetterSetter();
+ }
+};
+
+// Takes in an Object and a Value.
+class LMutateProto : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(MutateProto)
+
+ LMutateProto(const LAllocation& object, const LBoxAllocation& value) {
+ setOperand(0, object);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t ValueIndex = 1;
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ const LAllocation* getValue() {
+ return getOperand(1);
+ }
+};
+
+// Takes in an Object and a Value.
+class LInitProp : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InitProp)
+
+ LInitProp(const LAllocation& object, const LBoxAllocation& value) {
+ setOperand(0, object);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t ValueIndex = 1;
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ const LAllocation* getValue() {
+ return getOperand(1);
+ }
+
+ MInitProp* mir() const {
+ return mir_->toInitProp();
+ }
+};
+
+class LInitPropGetterSetter : public LCallInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(InitPropGetterSetter)
+
+ LInitPropGetterSetter(const LAllocation& object, const LAllocation& value) {
+ setOperand(0, object);
+ setOperand(1, value);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+
+ MInitPropGetterSetter* mir() const {
+ return mir_->toInitPropGetterSetter();
+ }
+};
+
+class LCheckOverRecursed : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(CheckOverRecursed)
+
+ LCheckOverRecursed()
+ { }
+
+ MCheckOverRecursed* mir() const {
+ return mir_->toCheckOverRecursed();
+ }
+};
+
+class LWasmTrap : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmTrap);
+
+ LWasmTrap()
+ { }
+
+ const MWasmTrap* mir() const {
+ return mir_->toWasmTrap();
+ }
+};
+
+template<size_t Defs, size_t Ops>
+class LWasmReinterpretBase : public LInstructionHelper<Defs, Ops, 0>
+{
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+
+ public:
+ const LAllocation* input() {
+ return Base::getOperand(0);
+ }
+ MWasmReinterpret* mir() const {
+ return Base::mir_->toWasmReinterpret();
+ }
+};
+
+class LWasmReinterpret : public LWasmReinterpretBase<1, 1>
+{
+ public:
+ LIR_HEADER(WasmReinterpret);
+ explicit LWasmReinterpret(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LWasmReinterpretFromI64 : public LWasmReinterpretBase<1, INT64_PIECES>
+{
+ public:
+ LIR_HEADER(WasmReinterpretFromI64);
+ explicit LWasmReinterpretFromI64(const LInt64Allocation& input) {
+ setInt64Operand(0, input);
+ }
+};
+
+class LWasmReinterpretToI64 : public LWasmReinterpretBase<INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(WasmReinterpretToI64);
+ explicit LWasmReinterpretToI64(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+namespace details {
+ template<size_t Defs, size_t Ops, size_t Temps>
+ class RotateBase : public LInstructionHelper<Defs, Ops, Temps>
+ {
+ typedef LInstructionHelper<Defs, Ops, Temps> Base;
+ public:
+ MRotate* mir() {
+ return Base::mir_->toRotate();
+ }
+ };
+} // details
+
+class LRotate : public details::RotateBase<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Rotate);
+
+ const LAllocation* input() { return getOperand(0); }
+ LAllocation* count() { return getOperand(1); }
+};
+
+class LRotateI64 : public details::RotateBase<INT64_PIECES, INT64_PIECES + 1, 1>
+{
+ public:
+ LIR_HEADER(RotateI64);
+
+ LRotateI64()
+ {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ static const size_t Input = 0;
+ static const size_t Count = INT64_PIECES;
+
+ const LInt64Allocation input() { return getInt64Operand(Input); }
+ const LDefinition* temp() { return getTemp(0); }
+ LAllocation* count() { return getOperand(Count); }
+};
+
+class LInterruptCheck : public LInstructionHelper<0, 0, 0>
+{
+ Label* oolEntry_;
+
+ // Whether this is an implicit interrupt check. Implicit interrupt checks
+ // use a patchable backedge and signal handlers instead of an explicit
+ // rt->interrupt check.
+ bool implicit_;
+
+ public:
+ LIR_HEADER(InterruptCheck)
+
+ LInterruptCheck()
+ : oolEntry_(nullptr),
+ implicit_(false)
+ {}
+
+ Label* oolEntry() {
+ MOZ_ASSERT(implicit_);
+ return oolEntry_;
+ }
+
+ void setOolEntry(Label* oolEntry) {
+ MOZ_ASSERT(implicit_);
+ oolEntry_ = oolEntry;
+ }
+ MInterruptCheck* mir() const {
+ return mir_->toInterruptCheck();
+ }
+
+ void setImplicit() {
+ implicit_ = true;
+ }
+ bool implicit() const {
+ return implicit_;
+ }
+};
+
+class LDefVar : public LCallInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(DefVar)
+
+ explicit LDefVar(const LAllocation& envChain)
+ {
+ setOperand(0, envChain);
+ }
+
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ MDefVar* mir() const {
+ return mir_->toDefVar();
+ }
+};
+
+class LDefLexical : public LCallInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(DefLexical)
+
+ MDefLexical* mir() const {
+ return mir_->toDefLexical();
+ }
+};
+
+class LDefFun : public LCallInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(DefFun)
+
+ LDefFun(const LAllocation& fun, const LAllocation& envChain)
+ {
+ setOperand(0, fun);
+ setOperand(1, envChain);
+ }
+
+ const LAllocation* fun() {
+ return getOperand(0);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(1);
+ }
+ MDefFun* mir() const {
+ return mir_->toDefFun();
+ }
+};
+
+class LTypeOfV : public LInstructionHelper<1, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(TypeOfV)
+
+ LTypeOfV(const LBoxAllocation& input, const LDefinition& tempToUnbox) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempToUnbox);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempToUnbox() {
+ return getTemp(0);
+ }
+
+ MTypeOf* mir() const {
+ return mir_->toTypeOf();
+ }
+};
+
+class LToAsync : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ToAsync)
+ explicit LToAsync(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* unwrapped() {
+ return getOperand(0);
+ }
+};
+
+class LToIdV : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ToIdV)
+
+ LToIdV(const LBoxAllocation& input, const LDefinition& temp)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ MToId* mir() const {
+ return mir_->toToId();
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+};
+
+// Allocate an object for |new| on the caller-side,
+// when there is no templateObject or prototype known
+class LCreateThis : public LCallInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(CreateThis)
+
+ LCreateThis(const LAllocation& callee, const LAllocation& newTarget)
+ {
+ setOperand(0, callee);
+ setOperand(1, newTarget);
+ }
+
+ const LAllocation* getCallee() {
+ return getOperand(0);
+ }
+ const LAllocation* getNewTarget() {
+ return getOperand(1);
+ }
+
+ MCreateThis* mir() const {
+ return mir_->toCreateThis();
+ }
+};
+
+// Allocate an object for |new| on the caller-side,
+// when the prototype is known.
+class LCreateThisWithProto : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(CreateThisWithProto)
+
+ LCreateThisWithProto(const LAllocation& callee, const LAllocation& newTarget,
+ const LAllocation& prototype)
+ {
+ setOperand(0, callee);
+ setOperand(1, newTarget);
+ setOperand(2, prototype);
+ }
+
+ const LAllocation* getCallee() {
+ return getOperand(0);
+ }
+ const LAllocation* getNewTarget() {
+ return getOperand(1);
+ }
+ const LAllocation* getPrototype() {
+ return getOperand(2);
+ }
+
+ MCreateThis* mir() const {
+ return mir_->toCreateThis();
+ }
+};
+
+// Allocate an object for |new| on the caller-side.
+// Always performs object initialization with a fast path.
+class LCreateThisWithTemplate : public LInstructionHelper<1, 0, 1>
+{
+ public:
+ LIR_HEADER(CreateThisWithTemplate)
+
+ explicit LCreateThisWithTemplate(const LDefinition& temp) {
+ setTemp(0, temp);
+ }
+
+ MCreateThisWithTemplate* mir() const {
+ return mir_->toCreateThisWithTemplate();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Allocate a new arguments object for the frame.
+class LCreateArgumentsObject : public LCallInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(CreateArgumentsObject)
+
+ LCreateArgumentsObject(const LAllocation& callObj, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ {
+ setOperand(0, callObj);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+
+ const LAllocation* getCallObject() {
+ return getOperand(0);
+ }
+
+ MCreateArgumentsObject* mir() const {
+ return mir_->toCreateArgumentsObject();
+ }
+};
+
+// Get argument from arguments object.
+class LGetArgumentsObjectArg : public LInstructionHelper<BOX_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(GetArgumentsObjectArg)
+
+ LGetArgumentsObjectArg(const LAllocation& argsObj, const LDefinition& temp)
+ {
+ setOperand(0, argsObj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() {
+ return getOperand(0);
+ }
+
+ MGetArgumentsObjectArg* mir() const {
+ return mir_->toGetArgumentsObjectArg();
+ }
+};
+
+// Set argument on arguments object.
+class LSetArgumentsObjectArg : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(SetArgumentsObjectArg)
+
+ LSetArgumentsObjectArg(const LAllocation& argsObj, const LBoxAllocation& value,
+ const LDefinition& temp)
+ {
+ setOperand(0, argsObj);
+ setBoxOperand(ValueIndex, value);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() {
+ return getOperand(0);
+ }
+
+ MSetArgumentsObjectArg* mir() const {
+ return mir_->toSetArgumentsObjectArg();
+ }
+
+ static const size_t ValueIndex = 1;
+};
+
+// If the Value is an Object, return unbox(Value).
+// Otherwise, return the other Object.
+class LReturnFromCtor : public LInstructionHelper<1, BOX_PIECES + 1, 0>
+{
+ public:
+ LIR_HEADER(ReturnFromCtor)
+
+ LReturnFromCtor(const LBoxAllocation& value, const LAllocation& object)
+ {
+ setBoxOperand(ValueIndex, value);
+ setOperand(ObjectIndex, object);
+ }
+
+ const LAllocation* getObject() {
+ return getOperand(ObjectIndex);
+ }
+
+ static const size_t ValueIndex = 0;
+ static const size_t ObjectIndex = BOX_PIECES;
+};
+
+class LComputeThis : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ComputeThis)
+
+ static const size_t ValueIndex = 0;
+
+ explicit LComputeThis(const LBoxAllocation& value) {
+ setBoxOperand(ValueIndex, value);
+ }
+
+ const LDefinition* output() {
+ return getDef(0);
+ }
+
+ MComputeThis* mir() const {
+ return mir_->toComputeThis();
+ }
+};
+
+// Writes a typed argument for a function call to the frame's argument vector.
+class LStackArgT : public LInstructionHelper<0, 1, 0>
+{
+ uint32_t argslot_; // Index into frame-scope argument vector.
+ MIRType type_;
+
+ public:
+ LIR_HEADER(StackArgT)
+
+ LStackArgT(uint32_t argslot, MIRType type, const LAllocation& arg)
+ : argslot_(argslot),
+ type_(type)
+ {
+ setOperand(0, arg);
+ }
+ uint32_t argslot() const {
+ return argslot_;
+ }
+ MIRType type() const {
+ return type_;
+ }
+ const LAllocation* getArgument() {
+ return getOperand(0);
+ }
+};
+
+// Writes an untyped argument for a function call to the frame's argument vector.
+class LStackArgV : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ uint32_t argslot_; // Index into frame-scope argument vector.
+
+ public:
+ LIR_HEADER(StackArgV)
+
+ LStackArgV(uint32_t argslot, const LBoxAllocation& value)
+ : argslot_(argslot)
+ {
+ setBoxOperand(0, value);
+ }
+
+ uint32_t argslot() const {
+ return argslot_;
+ }
+};
+
+// Common code for LIR descended from MCall.
+template <size_t Defs, size_t Operands, size_t Temps>
+class LJSCallInstructionHelper : public LCallInstructionHelper<Defs, Operands, Temps>
+{
+ public:
+ uint32_t argslot() const {
+ if (JitStackValueAlignment > 1)
+ return AlignBytes(mir()->numStackArgs(), JitStackValueAlignment);
+ return mir()->numStackArgs();
+ }
+ MCall* mir() const {
+ return this->mir_->toCall();
+ }
+
+ bool hasSingleTarget() const {
+ return getSingleTarget() != nullptr;
+ }
+ WrappedFunction* getSingleTarget() const {
+ return mir()->getSingleTarget();
+ }
+
+ // Does not include |this|.
+ uint32_t numActualArgs() const {
+ return mir()->numActualArgs();
+ }
+
+ bool isConstructing() const {
+ return mir()->isConstructing();
+ }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LCallGeneric : public LJSCallInstructionHelper<BOX_PIECES, 1, 2>
+{
+ public:
+ LIR_HEADER(CallGeneric)
+
+ LCallGeneric(const LAllocation& func, const LDefinition& nargsreg,
+ const LDefinition& tmpobjreg)
+ {
+ setOperand(0, func);
+ setTemp(0, nargsreg);
+ setTemp(1, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LDefinition* getNargsReg() {
+ return getTemp(0);
+ }
+ const LDefinition* getTempObject() {
+ return getTemp(1);
+ }
+};
+
+// Generates a hardcoded callsite for a known, non-native target.
+class LCallKnown : public LJSCallInstructionHelper<BOX_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(CallKnown)
+
+ LCallKnown(const LAllocation& func, const LDefinition& tmpobjreg)
+ {
+ setOperand(0, func);
+ setTemp(0, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LDefinition* getTempObject() {
+ return getTemp(0);
+ }
+};
+
+// Generates a hardcoded callsite for a known, native target.
+class LCallNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4>
+{
+ public:
+ LIR_HEADER(CallNative)
+
+ LCallNative(const LDefinition& argContext, const LDefinition& argUintN,
+ const LDefinition& argVp, const LDefinition& tmpreg)
+ {
+ // Registers used for callWithABI().
+ setTemp(0, argContext);
+ setTemp(1, argUintN);
+ setTemp(2, argVp);
+
+ // Temporary registers.
+ setTemp(3, tmpreg);
+ }
+
+ const LDefinition* getArgContextReg() {
+ return getTemp(0);
+ }
+ const LDefinition* getArgUintNReg() {
+ return getTemp(1);
+ }
+ const LDefinition* getArgVpReg() {
+ return getTemp(2);
+ }
+ const LDefinition* getTempReg() {
+ return getTemp(3);
+ }
+};
+
+// Generates a hardcoded callsite for a known, DOM-native target.
+class LCallDOMNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4>
+{
+ public:
+ LIR_HEADER(CallDOMNative)
+
+ LCallDOMNative(const LDefinition& argJSContext, const LDefinition& argObj,
+ const LDefinition& argPrivate, const LDefinition& argArgs)
+ {
+ setTemp(0, argJSContext);
+ setTemp(1, argObj);
+ setTemp(2, argPrivate);
+ setTemp(3, argArgs);
+ }
+
+ const LDefinition* getArgJSContext() {
+ return getTemp(0);
+ }
+ const LDefinition* getArgObj() {
+ return getTemp(1);
+ }
+ const LDefinition* getArgPrivate() {
+ return getTemp(2);
+ }
+ const LDefinition* getArgArgs() {
+ return getTemp(3);
+ }
+};
+
+class LBail : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(Bail)
+};
+
+class LUnreachable : public LControlInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(Unreachable)
+};
+
+class LEncodeSnapshot : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(EncodeSnapshot)
+};
+
+template <size_t defs, size_t ops>
+class LDOMPropertyInstructionHelper : public LCallInstructionHelper<defs, 1 + ops, 3>
+{
+ protected:
+ LDOMPropertyInstructionHelper(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LDefinition& PrivReg, const LDefinition& ValueReg)
+ {
+ this->setOperand(0, ObjectReg);
+ this->setTemp(0, JSContextReg);
+ this->setTemp(1, PrivReg);
+ this->setTemp(2, ValueReg);
+ }
+
+ public:
+ const LDefinition* getJSContextReg() {
+ return this->getTemp(0);
+ }
+ const LAllocation* getObjectReg() {
+ return this->getOperand(0);
+ }
+ const LDefinition* getPrivReg() {
+ return this->getTemp(1);
+ }
+ const LDefinition* getValueReg() {
+ return this->getTemp(2);
+ }
+};
+
+
+class LGetDOMProperty : public LDOMPropertyInstructionHelper<BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(GetDOMProperty)
+
+ LGetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LDefinition& PrivReg, const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<BOX_PIECES, 0>(JSContextReg, ObjectReg,
+ PrivReg, ValueReg)
+ { }
+
+ MGetDOMProperty* mir() const {
+ return mir_->toGetDOMProperty();
+ }
+};
+
+class LGetDOMMemberV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetDOMMemberV);
+ explicit LGetDOMMemberV(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ MGetDOMMember* mir() const {
+ return mir_->toGetDOMMember();
+ }
+};
+
+class LGetDOMMemberT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(GetDOMMemberT);
+ explicit LGetDOMMemberT(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ MGetDOMMember* mir() const {
+ return mir_->toGetDOMMember();
+ }
+};
+
+class LSetDOMProperty : public LDOMPropertyInstructionHelper<0, BOX_PIECES>
+{
+ public:
+ LIR_HEADER(SetDOMProperty)
+
+ LSetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LBoxAllocation& value, const LDefinition& PrivReg,
+ const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<0, BOX_PIECES>(JSContextReg, ObjectReg,
+ PrivReg, ValueReg)
+ {
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ MSetDOMProperty* mir() const {
+ return mir_->toSetDOMProperty();
+ }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LApplyArgsGeneric : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2>
+{
+ public:
+ LIR_HEADER(ApplyArgsGeneric)
+
+ LApplyArgsGeneric(const LAllocation& func, const LAllocation& argc,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArgs* mir() const {
+ return mir_->toApplyArgs();
+ }
+
+ bool hasSingleTarget() const {
+ return getSingleTarget() != nullptr;
+ }
+ WrappedFunction* getSingleTarget() const {
+ return mir()->getSingleTarget();
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LAllocation* getArgc() {
+ return getOperand(1);
+ }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() {
+ return getTemp(0);
+ }
+ const LDefinition* getTempStackCounter() {
+ return getTemp(1);
+ }
+};
+
+class LApplyArrayGeneric : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2>
+{
+ public:
+ LIR_HEADER(ApplyArrayGeneric)
+
+ LApplyArrayGeneric(const LAllocation& func, const LAllocation& elements,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArray* mir() const {
+ return mir_->toApplyArray();
+ }
+
+ bool hasSingleTarget() const {
+ return getSingleTarget() != nullptr;
+ }
+ WrappedFunction* getSingleTarget() const {
+ return mir()->getSingleTarget();
+ }
+
+ const LAllocation* getFunction() {
+ return getOperand(0);
+ }
+ const LAllocation* getElements() {
+ return getOperand(1);
+ }
+ // argc is mapped to the same register as elements: argc becomes
+ // live as elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() {
+ return getOperand(1);
+ }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() {
+ return getTemp(0);
+ }
+ const LDefinition* getTempStackCounter() {
+ return getTemp(1);
+ }
+};
+
+class LArraySplice : public LCallInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(ArraySplice)
+
+ LArraySplice(const LAllocation& object, const LAllocation& start,
+ const LAllocation& deleteCount)
+ {
+ setOperand(0, object);
+ setOperand(1, start);
+ setOperand(2, deleteCount);
+ }
+
+ MArraySplice* mir() const {
+ return mir_->toArraySplice();
+ }
+
+ const LAllocation* getObject() {
+ return getOperand(0);
+ }
+ const LAllocation* getStart() {
+ return getOperand(1);
+ }
+ const LAllocation* getDeleteCount() {
+ return getOperand(2);
+ }
+};
+
+class LGetDynamicName : public LCallInstructionHelper<BOX_PIECES, 2, 3>
+{
+ public:
+ LIR_HEADER(GetDynamicName)
+
+ LGetDynamicName(const LAllocation& envChain, const LAllocation& name,
+ const LDefinition& temp1, const LDefinition& temp2, const LDefinition& temp3)
+ {
+ setOperand(0, envChain);
+ setOperand(1, name);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ MGetDynamicName* mir() const {
+ return mir_->toGetDynamicName();
+ }
+
+ const LAllocation* getEnvironmentChain() {
+ return getOperand(0);
+ }
+ const LAllocation* getName() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+};
+
+class LCallDirectEval : public LCallInstructionHelper<BOX_PIECES, 2 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallDirectEval)
+
+ LCallDirectEval(const LAllocation& envChain, const LAllocation& string,
+ const LBoxAllocation& newTarget)
+ {
+ setOperand(0, envChain);
+ setOperand(1, string);
+ setBoxOperand(NewTarget, newTarget);
+ }
+
+ static const size_t NewTarget = 2;
+
+ MCallDirectEval* mir() const {
+ return mir_->toCallDirectEval();
+ }
+
+ const LAllocation* getEnvironmentChain() {
+ return getOperand(0);
+ }
+ const LAllocation* getString() {
+ return getOperand(1);
+ }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestIAndBranch : public LControlInstructionHelper<2, 1, 0>
+{
+ public:
+ LIR_HEADER(TestIAndBranch)
+
+ LTestIAndBranch(const LAllocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes in an int64 input and tests it for truthiness.
+class LTestI64AndBranch : public LControlInstructionHelper<2, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(TestI64AndBranch)
+
+ LTestI64AndBranch(const LInt64Allocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setInt64Operand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestDAndBranch : public LControlInstructionHelper<2, 1, 0>
+{
+ public:
+ LIR_HEADER(TestDAndBranch)
+
+ LTestDAndBranch(const LAllocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestFAndBranch : public LControlInstructionHelper<2, 1, 0>
+{
+ public:
+ LIR_HEADER(TestFAndBranch)
+
+ LTestFAndBranch(const LAllocation& in, MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+// Takes an object and tests it for truthiness. An object is falsy iff it
+// emulates |undefined|; see js::EmulatesUndefined.
+class LTestOAndBranch : public LControlInstructionHelper<2, 1, 1>
+{
+ public:
+ LIR_HEADER(TestOAndBranch)
+
+ LTestOAndBranch(const LAllocation& input, MBasicBlock* ifTruthy, MBasicBlock* ifFalsy,
+ const LDefinition& temp)
+ {
+ setOperand(0, input);
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MBasicBlock* ifTruthy() {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalsy() {
+ return getSuccessor(1);
+ }
+
+ MTest* mir() {
+ return mir_->toTest();
+ }
+};
+
+// Takes in a boxed value and tests it for truthiness.
+class LTestVAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(TestVAndBranch)
+
+ LTestVAndBranch(MBasicBlock* ifTruthy, MBasicBlock* ifFalsy, const LBoxAllocation& input,
+ const LDefinition& temp0, const LDefinition& temp1, const LDefinition& temp2)
+ {
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const char* extraName() const {
+ return mir()->operandMightEmulateUndefined() ? "MightEmulateUndefined" : nullptr;
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+
+ MBasicBlock* ifTruthy() {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalsy() {
+ return getSuccessor(1);
+ }
+
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+};
+
+// Dispatches control flow to a successor based on incoming JSFunction*.
+// Used to implemenent polymorphic inlining.
+class LFunctionDispatch : public LInstructionHelper<0, 1, 0>
+{
+ // Dispatch is performed based on a function -> block map
+ // stored in the MIR.
+
+ public:
+ LIR_HEADER(FunctionDispatch);
+
+ explicit LFunctionDispatch(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MFunctionDispatch* mir() const {
+ return mir_->toFunctionDispatch();
+ }
+};
+
+class LObjectGroupDispatch : public LInstructionHelper<0, 1, 1>
+{
+ // Dispatch is performed based on an ObjectGroup -> block
+ // map inferred by the MIR.
+
+ public:
+ LIR_HEADER(ObjectGroupDispatch);
+
+ const char* extraName() const {
+ return mir()->hasFallback() ? "HasFallback" : "NoFallback";
+ }
+
+ LObjectGroupDispatch(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MObjectGroupDispatch* mir() const {
+ return mir_->toObjectGroupDispatch();
+ }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompare : public LInstructionHelper<1, 2, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(Compare)
+ LCompare(JSOp jsop, const LAllocation& left, const LAllocation& right)
+ : jsop_(jsop)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LCompareI64 : public LInstructionHelper<1, 2 * INT64_PIECES, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64(JSOp jsop, const LInt64Allocation& left, const LInt64Allocation& right)
+ : jsop_(jsop)
+ {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LCompareI64AndBranch : public LControlInstructionHelper<2, 2 * INT64_PIECES, 0>
+{
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64AndBranch)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64AndBranch(MCompare* cmpMir, JSOp jsop,
+ const LInt64Allocation& left, const LInt64Allocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir), jsop_(jsop)
+ {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompareAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareAndBranch)
+ LCompareAndBranch(MCompare* cmpMir, JSOp jsop,
+ const LAllocation& left, const LAllocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir), jsop_(jsop)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LCompareD : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CompareD)
+ LCompareD(const LAllocation& left, const LAllocation& right) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareF : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CompareF)
+ LCompareF(const LAllocation& left, const LAllocation& right) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareDAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareDAndBranch)
+ LCompareDAndBranch(MCompare* cmpMir, const LAllocation& left, const LAllocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareFAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareFAndBranch)
+ LCompareFAndBranch(MCompare* cmpMir, const LAllocation& left, const LAllocation& right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir)
+ {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareS : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CompareS)
+ LCompareS(const LAllocation& left, const LAllocation& right) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+// strict-equality between value and string.
+class LCompareStrictS : public LInstructionHelper<1, BOX_PIECES + 1, 1>
+{
+ public:
+ LIR_HEADER(CompareStrictS)
+ LCompareStrictS(const LBoxAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setBoxOperand(Lhs, lhs);
+ setOperand(BOX_PIECES, rhs);
+ setTemp(0, temp);
+ }
+
+ static const size_t Lhs = 0;
+
+ const LAllocation* right() {
+ return getOperand(BOX_PIECES);
+ }
+ const LDefinition* tempToUnbox() {
+ return getTemp(0);
+ }
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+// Used for strict-equality comparisons where one side is a boolean
+// and the other is a value. Note that CompareI is used to compare
+// two booleans.
+class LCompareB : public LInstructionHelper<1, BOX_PIECES + 1, 0>
+{
+ public:
+ LIR_HEADER(CompareB)
+
+ LCompareB(const LBoxAllocation& lhs, const LAllocation& rhs) {
+ setBoxOperand(Lhs, lhs);
+ setOperand(BOX_PIECES, rhs);
+ }
+
+ static const size_t Lhs = 0;
+
+ const LAllocation* rhs() {
+ return getOperand(BOX_PIECES);
+ }
+
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareBAndBranch : public LControlInstructionHelper<2, BOX_PIECES + 1, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareBAndBranch)
+
+ LCompareBAndBranch(MCompare* cmpMir, const LBoxAllocation& lhs, const LAllocation& rhs,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : cmpMir_(cmpMir)
+ {
+ setBoxOperand(Lhs, lhs);
+ setOperand(BOX_PIECES, rhs);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ static const size_t Lhs = 0;
+
+ const LAllocation* rhs() {
+ return getOperand(BOX_PIECES);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareBitwise : public LInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CompareBitwise)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCompareBitwise(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MCompare* mir() const {
+ return mir_->toCompare();
+ }
+};
+
+class LCompareBitwiseAndBranch : public LControlInstructionHelper<2, 2 * BOX_PIECES, 0>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareBitwiseAndBranch)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCompareBitwiseAndBranch(MCompare* cmpMir, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& lhs, const LBoxAllocation& rhs)
+ : cmpMir_(cmpMir)
+ {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+};
+
+class LCompareVM : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CompareVM)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCompareVM(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MCompare* mir() const {
+ return mir_->toCompare();
+ }
+};
+
+class LBitAndAndBranch : public LControlInstructionHelper<2, 2, 0>
+{
+ public:
+ LIR_HEADER(BitAndAndBranch)
+ LBitAndAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ const LAllocation* left() {
+ return getOperand(0);
+ }
+ const LAllocation* right() {
+ return getOperand(1);
+ }
+};
+
+// Takes a value and tests whether it is null, undefined, or is an object that
+// emulates |undefined|, as determined by the JSCLASS_EMULATES_UNDEFINED class
+// flag on unwrapped objects. See also js::EmulatesUndefined.
+class LIsNullOrLikeUndefinedV : public LInstructionHelper<1, BOX_PIECES, 2>
+{
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedV)
+
+ LIsNullOrLikeUndefinedV(const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ {
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const LDefinition* tempToUnbox() {
+ return getTemp(1);
+ }
+};
+
+// Takes an object or object-or-null pointer and tests whether it is null or is
+// an object that emulates |undefined|, as above.
+class LIsNullOrLikeUndefinedT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedT)
+
+ explicit LIsNullOrLikeUndefinedT(const LAllocation& input)
+ {
+ setOperand(0, input);
+ }
+
+ MCompare* mir() {
+ return mir_->toCompare();
+ }
+};
+
+class LIsNullOrLikeUndefinedAndBranchV : public LControlInstructionHelper<2, BOX_PIECES, 2>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchV)
+
+ LIsNullOrLikeUndefinedAndBranchV(MCompare* cmpMir, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ : cmpMir_(cmpMir)
+ {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* tempToUnbox() {
+ return getTemp(1);
+ }
+};
+
+class LIsNullOrLikeUndefinedAndBranchT : public LControlInstructionHelper<2, 1, 1>
+{
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchT)
+
+ LIsNullOrLikeUndefinedAndBranchT(MCompare* cmpMir, const LAllocation& input,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LDefinition& temp)
+ : cmpMir_(cmpMir)
+ {
+ setOperand(0, input);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setTemp(0, temp);
+ }
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+ MTest* mir() const {
+ return mir_->toTest();
+ }
+ MCompare* cmpMir() const {
+ return cmpMir_;
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Not operation on an integer.
+class LNotI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotI)
+
+ explicit LNotI(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Not operation on an int64.
+class LNotI64 : public LInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(NotI64)
+
+ explicit LNotI64(const LInt64Allocation& input) {
+ setInt64Operand(0, input);
+ }
+};
+
+// Not operation on a double.
+class LNotD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotD)
+
+ explicit LNotD(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Not operation on a float32.
+class LNotF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotF)
+
+ explicit LNotF(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Boolean complement operation on an object.
+class LNotO : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NotO)
+
+ explicit LNotO(const LAllocation& input)
+ {
+ setOperand(0, input);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Boolean complement operation on a value.
+class LNotV : public LInstructionHelper<1, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(NotV)
+
+ static const size_t Input = 0;
+ LNotV(const LBoxAllocation& input, const LDefinition& temp0, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+
+ MNot* mir() {
+ return mir_->toNot();
+ }
+};
+
+// Bitwise not operation, takes a 32-bit integer as input and returning
+// a 32-bit integer result as an output.
+class LBitNotI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(BitNotI)
+};
+
+// Call a VM function to perform a BITNOT operation.
+class LBitNotV : public LCallInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(BitNotV)
+
+ static const size_t Input = 0;
+
+ explicit LBitNotV(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+};
+
+// Binary bitwise operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LBitOpI : public LInstructionHelper<1, 2, 0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI)
+
+ explicit LBitOpI(JSOp op)
+ : op_(op)
+ { }
+
+ const char* extraName() const {
+ if (bitop() == JSOP_URSH && mir_->toUrsh()->bailoutsDisabled())
+ return "ursh:BailoutsDisabled";
+ return CodeName[op_];
+ }
+
+ JSOp bitop() const {
+ return op_;
+ }
+};
+
+class LBitOpI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ explicit LBitOpI64(JSOp op)
+ : op_(op)
+ { }
+
+ const char* extraName() const {
+ return CodeName[op_];
+ }
+
+ JSOp bitop() const {
+ return op_;
+ }
+};
+
+// Call a VM function to perform a bitwise operation.
+class LBitOpV : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(BitOpV)
+
+ LBitOpV(JSOp jsop, const LBoxAllocation& lhs, const LBoxAllocation& rhs)
+ : jsop_(jsop)
+ {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+// Shift operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LShiftI : public LBinaryMath<0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI)
+
+ explicit LShiftI(JSOp op)
+ : op_(op)
+ { }
+
+ JSOp bitop() {
+ return op_;
+ }
+
+ MInstruction* mir() {
+ return mir_->toInstruction();
+ }
+
+ const char* extraName() const {
+ return CodeName[op_];
+ }
+};
+
+class LShiftI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>
+{
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI64)
+
+ explicit LShiftI64(JSOp op)
+ : op_(op)
+ { }
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ JSOp bitop() {
+ return op_;
+ }
+
+ MInstruction* mir() {
+ return mir_->toInstruction();
+ }
+
+ const char* extraName() const {
+ return CodeName[op_];
+ }
+};
+
+// Sign extension
+class LSignExtend : public LInstructionHelper<1, 1, 0>
+{
+ MSignExtend::Mode mode_;
+
+ public:
+ LIR_HEADER(SignExtend);
+ explicit LSignExtend(const LAllocation& num, MSignExtend::Mode mode)
+ : mode_(mode)
+ {
+ setOperand(0, num);
+ }
+
+ MSignExtend::Mode mode() { return mode_; }
+};
+
+class LUrshD : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(UrshD)
+
+ LUrshD(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Returns from the function being compiled (not used in inlined frames). The
+// input must be a box.
+class LReturn : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Return)
+};
+
+class LThrow : public LCallInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Throw)
+
+ static const size_t Value = 0;
+
+ explicit LThrow(const LBoxAllocation& value) {
+ setBoxOperand(Value, value);
+ }
+};
+
+class LMinMaxBase : public LInstructionHelper<1, 2, 0>
+{
+ protected:
+ LMinMaxBase(const LAllocation& first, const LAllocation& second)
+ {
+ setOperand(0, first);
+ setOperand(1, second);
+ }
+
+ public:
+ const LAllocation* first() {
+ return this->getOperand(0);
+ }
+ const LAllocation* second() {
+ return this->getOperand(1);
+ }
+ const LDefinition* output() {
+ return this->getDef(0);
+ }
+ MMinMax* mir() const {
+ return mir_->toMinMax();
+ }
+ const char* extraName() const {
+ return mir()->isMax() ? "Max" : "Min";
+ }
+};
+
+class LMinMaxI : public LMinMaxBase
+{
+ public:
+ LIR_HEADER(MinMaxI)
+ LMinMaxI(const LAllocation& first, const LAllocation& second) : LMinMaxBase(first, second)
+ {}
+};
+
+class LMinMaxD : public LMinMaxBase
+{
+ public:
+ LIR_HEADER(MinMaxD)
+ LMinMaxD(const LAllocation& first, const LAllocation& second) : LMinMaxBase(first, second)
+ {}
+};
+
+class LMinMaxF : public LMinMaxBase
+{
+ public:
+ LIR_HEADER(MinMaxF)
+ LMinMaxF(const LAllocation& first, const LAllocation& second) : LMinMaxBase(first, second)
+ {}
+};
+
+// Negative of an integer
+class LNegI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NegI);
+ explicit LNegI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Negative of a double.
+class LNegD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NegD)
+ explicit LNegD(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Negative of a float32.
+class LNegF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(NegF)
+ explicit LNegF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of an integer.
+class LAbsI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AbsI)
+ explicit LAbsI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of a double.
+class LAbsD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AbsD)
+ explicit LAbsD(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of a float32.
+class LAbsF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AbsF)
+ explicit LAbsF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Copysign for doubles.
+class LCopySignD : public LInstructionHelper<1, 2, 2>
+{
+ public:
+ LIR_HEADER(CopySignD)
+ explicit LCopySignD() {}
+};
+
+// Copysign for float32.
+class LCopySignF : public LInstructionHelper<1, 2, 2>
+{
+ public:
+ LIR_HEADER(CopySignF)
+ explicit LCopySignF() {}
+};
+
+// Count leading zeroes on an int32.
+class LClzI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ClzI)
+ explicit LClzI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+
+ MClz* mir() const {
+ return mir_->toClz();
+ }
+};
+
+// Count leading zeroes on an int64.
+class LClzI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ClzI64)
+ explicit LClzI64(const LInt64Allocation& num) {
+ setInt64Operand(0, num);
+ }
+
+ MClz* mir() const {
+ return mir_->toClz();
+ }
+};
+
+// Count trailing zeroes on an int32.
+class LCtzI : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CtzI)
+ explicit LCtzI(const LAllocation& num) {
+ setOperand(0, num);
+ }
+
+ MCtz* mir() const {
+ return mir_->toCtz();
+ }
+};
+
+// Count trailing zeroes on an int64.
+class LCtzI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CtzI64)
+ explicit LCtzI64(const LInt64Allocation& num) {
+ setInt64Operand(0, num);
+ }
+
+ MCtz* mir() const {
+ return mir_->toCtz();
+ }
+};
+
+// Count population on an int32.
+class LPopcntI : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(PopcntI)
+ explicit LPopcntI(const LAllocation& num, const LDefinition& temp) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ MPopcnt* mir() const {
+ return mir_->toPopcnt();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Count population on an int64.
+class LPopcntI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(PopcntI64)
+ explicit LPopcntI64(const LInt64Allocation& num, const LDefinition& temp) {
+ setInt64Operand(0, num);
+ setTemp(0, temp);
+ }
+
+ MPopcnt* mir() const {
+ return mir_->toPopcnt();
+ }
+};
+
+// Square root of a double.
+class LSqrtD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SqrtD)
+ explicit LSqrtD(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Square root of a float32.
+class LSqrtF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(SqrtF)
+ explicit LSqrtF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+class LAtan2D : public LCallInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(Atan2D)
+ LAtan2D(const LAllocation& y, const LAllocation& x, const LDefinition& temp) {
+ setOperand(0, y);
+ setOperand(1, x);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* y() {
+ return getOperand(0);
+ }
+
+ const LAllocation* x() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const LDefinition* output() {
+ return getDef(0);
+ }
+};
+
+class LHypot : public LCallInstructionHelper<1, 4, 1>
+{
+ uint32_t numOperands_;
+ public:
+ LIR_HEADER(Hypot)
+ LHypot(const LAllocation& x, const LAllocation& y, const LDefinition& temp)
+ : numOperands_(2)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setTemp(0, temp);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z, const LDefinition& temp)
+ : numOperands_(3)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setTemp(0, temp);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z, const LAllocation& w, const LDefinition& temp)
+ : numOperands_(4)
+ {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setOperand(3, w);
+ setTemp(0, temp);
+ }
+
+ uint32_t numArgs() const { return numOperands_; }
+
+ const LAllocation* x() {
+ return getOperand(0);
+ }
+
+ const LAllocation* y() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const LDefinition* output() {
+ return getDef(0);
+ }
+};
+
+// Double raised to an integer power.
+class LPowI : public LCallInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(PowI)
+ LPowI(const LAllocation& value, const LAllocation& power, const LDefinition& temp) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+ const LAllocation* power() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Double raised to a double power.
+class LPowD : public LCallInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(PowD)
+ LPowD(const LAllocation& value, const LAllocation& power, const LDefinition& temp) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+ const LAllocation* power() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LMathFunctionD : public LCallInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(MathFunctionD)
+ LMathFunctionD(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MMathFunction* mir() const {
+ return mir_->toMathFunction();
+ }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+class LMathFunctionF : public LCallInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(MathFunctionF)
+ LMathFunctionF(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MMathFunction* mir() const {
+ return mir_->toMathFunction();
+ }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+// Adds two integers, returning an integer value.
+class LAddI : public LBinaryMath<0>
+{
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(AddI)
+
+ LAddI()
+ : recoversInput_(false)
+ { }
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ virtual bool recoversInput() const {
+ return recoversInput_;
+ }
+ void setRecoversInput() {
+ recoversInput_ = true;
+ }
+
+ MAdd* mir() const {
+ return mir_->toAdd();
+ }
+};
+
+class LAddI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(AddI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Subtracts two integers, returning an integer value.
+class LSubI : public LBinaryMath<0>
+{
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(SubI)
+
+ LSubI()
+ : recoversInput_(false)
+ { }
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ virtual bool recoversInput() const {
+ return recoversInput_;
+ }
+ void setRecoversInput() {
+ recoversInput_ = true;
+ }
+ MSub* mir() const {
+ return mir_->toSub();
+ }
+};
+
+class LSubI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(SubI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+class LMulI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 1>
+{
+ public:
+ LIR_HEADER(MulI64)
+
+ explicit LMulI64()
+ {
+ setTemp(0, LDefinition());
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathD : public LBinaryMath<0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathD)
+
+ explicit LMathD(JSOp jsop)
+ : jsop_(jsop)
+ { }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathF: public LBinaryMath<0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathF)
+
+ explicit LMathF(JSOp jsop)
+ : jsop_(jsop)
+ { }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+};
+
+class LModD : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModD)
+
+ LModD(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ bool isCall() const {
+ return true;
+ }
+};
+
+// Call a VM function to perform a binary operation.
+class LBinaryV : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(BinaryV)
+
+ LBinaryV(JSOp jsop, const LBoxAllocation& lhs, const LBoxAllocation& rhs)
+ : jsop_(jsop)
+ {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ JSOp jsop() const {
+ return jsop_;
+ }
+
+ const char* extraName() const {
+ return CodeName[jsop_];
+ }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+// Adds two string, returning a string.
+class LConcat : public LInstructionHelper<1, 2, 5>
+{
+ public:
+ LIR_HEADER(Concat)
+
+ LConcat(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3, const LDefinition& temp4,
+ const LDefinition& temp5)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ setTemp(3, temp4);
+ setTemp(4, temp5);
+ }
+
+ const LAllocation* lhs() {
+ return this->getOperand(0);
+ }
+ const LAllocation* rhs() {
+ return this->getOperand(1);
+ }
+ const LDefinition* temp1() {
+ return this->getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return this->getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return this->getTemp(2);
+ }
+ const LDefinition* temp4() {
+ return this->getTemp(3);
+ }
+ const LDefinition* temp5() {
+ return this->getTemp(4);
+ }
+};
+
+// Get uint16 character code from a string.
+class LCharCodeAt : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(CharCodeAt)
+
+ LCharCodeAt(const LAllocation& str, const LAllocation& index) {
+ setOperand(0, str);
+ setOperand(1, index);
+ }
+
+ const LAllocation* str() {
+ return this->getOperand(0);
+ }
+ const LAllocation* index() {
+ return this->getOperand(1);
+ }
+};
+
+// Convert uint16 character code to a string.
+class LFromCharCode : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FromCharCode)
+
+ explicit LFromCharCode(const LAllocation& code) {
+ setOperand(0, code);
+ }
+
+ const LAllocation* code() {
+ return this->getOperand(0);
+ }
+};
+
+// Convert uint32 code point to a string.
+class LFromCodePoint : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FromCodePoint)
+
+ explicit LFromCodePoint(const LAllocation& codePoint) {
+ setOperand(0, codePoint);
+ }
+
+ const LAllocation* codePoint() {
+ return this->getOperand(0);
+ }
+};
+
+// Calculates sincos(x) and returns two values (sin/cos).
+class LSinCos : public LCallInstructionHelper<2, 1, 2>
+{
+ public:
+ LIR_HEADER(SinCos)
+
+ LSinCos(const LAllocation &input, const LDefinition &temp, const LDefinition &temp2)
+ {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+ const LAllocation *input() {
+ return getOperand(0);
+ }
+ const LDefinition *outputSin() {
+ return getDef(0);
+ }
+ const LDefinition *outputCos() {
+ return getDef(1);
+ }
+ const LDefinition *temp() {
+ return getTemp(0);
+ }
+ const LDefinition *temp2() {
+ return getTemp(1);
+ }
+ const MSinCos *mir() const {
+ return mir_->toSinCos();
+ }
+};
+
+class LStringSplit : public LCallInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(StringSplit)
+
+ LStringSplit(const LAllocation& string, const LAllocation& separator) {
+ setOperand(0, string);
+ setOperand(1, separator);
+ }
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+ const LAllocation* separator() {
+ return getOperand(1);
+ }
+ const MStringSplit* mir() const {
+ return mir_->toStringSplit();
+ }
+};
+
+class LSubstr : public LInstructionHelper<1, 3, 3>
+{
+ public:
+ LIR_HEADER(Substr)
+
+ LSubstr(const LAllocation& string, const LAllocation& begin, const LAllocation& length,
+ const LDefinition& temp, const LDefinition& temp2, const LDefinition& temp3)
+ {
+ setOperand(0, string);
+ setOperand(1, begin);
+ setOperand(2, length);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+ const LAllocation* begin() {
+ return getOperand(1);
+ }
+ const LAllocation* length() {
+ return getOperand(2);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+ const MStringSplit* mir() const {
+ return mir_->toStringSplit();
+ }
+};
+
+// Convert a 32-bit integer to a double.
+class LInt32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Int32ToDouble)
+
+ explicit LInt32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit float to a double.
+class LFloat32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Float32ToDouble)
+
+ explicit LFloat32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a double to a 32-bit float.
+class LDoubleToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(DoubleToFloat32)
+
+ explicit LDoubleToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit integer to a float32.
+class LInt32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Int32ToFloat32)
+
+ explicit LInt32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a value to a double.
+class LValueToDouble : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ValueToDouble)
+ static const size_t Input = 0;
+
+ explicit LValueToDouble(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MToDouble* mir() {
+ return mir_->toToDouble();
+ }
+};
+
+// Convert a value to a float32.
+class LValueToFloat32 : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ValueToFloat32)
+ static const size_t Input = 0;
+
+ explicit LValueToFloat32(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MToFloat32* mir() {
+ return mir_->toToFloat32();
+ }
+};
+
+// Convert a value to an int32.
+// Input: components of a Value
+// Output: 32-bit integer
+// Bailout: undefined, string, object, or non-int32 double
+// Temps: one float register, one GP register
+//
+// This instruction requires a temporary float register.
+class LValueToInt32 : public LInstructionHelper<1, BOX_PIECES, 2>
+{
+ public:
+ enum Mode {
+ NORMAL,
+ TRUNCATE
+ };
+
+ private:
+ Mode mode_;
+
+ public:
+ LIR_HEADER(ValueToInt32)
+
+ LValueToInt32(const LBoxAllocation& input, const LDefinition& temp0, const LDefinition& temp1,
+ Mode mode)
+ : mode_(mode)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mode() == NORMAL ? "Normal" : "Truncate";
+ }
+
+ static const size_t Input = 0;
+
+ Mode mode() const {
+ return mode_;
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(1);
+ }
+ MToInt32* mirNormal() const {
+ MOZ_ASSERT(mode_ == NORMAL);
+ return mir_->toToInt32();
+ }
+ MTruncateToInt32* mirTruncate() const {
+ MOZ_ASSERT(mode_ == TRUNCATE);
+ return mir_->toTruncateToInt32();
+ }
+ MInstruction* mir() const {
+ return mir_->toInstruction();
+ }
+};
+
+// Convert a double to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the double cannot be converted to an integer.
+class LDoubleToInt32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(DoubleToInt32)
+
+ explicit LDoubleToInt32(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MToInt32* mir() const {
+ return mir_->toToInt32();
+ }
+};
+
+// Convert a float32 to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the float32 cannot be converted to an integer.
+class LFloat32ToInt32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Float32ToInt32)
+
+ explicit LFloat32ToInt32(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MToInt32* mir() const {
+ return mir_->toToInt32();
+ }
+};
+
+// Convert a double to a truncated int32.
+// Input: floating-point register
+// Output: 32-bit integer
+class LTruncateDToInt32 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(TruncateDToInt32)
+
+ LTruncateDToInt32(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ MTruncateToInt32* mir() const {
+ return mir_->toTruncateToInt32();
+ }
+};
+
+// Convert a float32 to a truncated int32.
+// Input: floating-point register
+// Output: 32-bit integer
+class LTruncateFToInt32 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(TruncateFToInt32)
+
+ LTruncateFToInt32(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+
+ MTruncateToInt32* mir() const {
+ return mir_->toTruncateToInt32();
+ }
+};
+
+class LWasmTruncateToInt32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt32)
+
+ explicit LWasmTruncateToInt32(const LAllocation& in) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt32* mir() const {
+ return mir_->toWasmTruncateToInt32();
+ }
+};
+
+class LWrapInt64ToInt32 : public LInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(WrapInt64ToInt32)
+
+ static const size_t Input = 0;
+
+ explicit LWrapInt64ToInt32(const LInt64Allocation& input) {
+ setInt64Operand(Input, input);
+ }
+
+ const MWrapInt64ToInt32* mir() {
+ return mir_->toWrapInt64ToInt32();
+ }
+};
+
+class LExtendInt32ToInt64 : public LInstructionHelper<INT64_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(ExtendInt32ToInt64)
+
+ explicit LExtendInt32ToInt64(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const MExtendInt32ToInt64* mir() {
+ return mir_->toExtendInt32ToInt64();
+ }
+};
+
+// Convert a boolean value to a string.
+class LBooleanToString : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(BooleanToString)
+
+ explicit LBooleanToString(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+};
+
+// Convert an integer hosted on one definition to a string with a function call.
+class LIntToString : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IntToString)
+
+ explicit LIntToString(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+};
+
+// Convert a double hosted on one definition to a string with a function call.
+class LDoubleToString : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(DoubleToString)
+
+ LDoubleToString(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+};
+
+// Convert a primitive to a string with a function call.
+class LValueToString : public LInstructionHelper<1, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ValueToString)
+
+ LValueToString(const LBoxAllocation& input, const LDefinition& tempToUnbox)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, tempToUnbox);
+ }
+
+ static const size_t Input = 0;
+
+ const MToString* mir() {
+ return mir_->toToString();
+ }
+
+ const LDefinition* tempToUnbox() {
+ return getTemp(0);
+ }
+};
+
+// Convert a value to an object or null pointer.
+class LValueToObjectOrNull : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(ValueToObjectOrNull)
+
+ explicit LValueToObjectOrNull(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ const MToObjectOrNull* mir() {
+ return mir_->toToObjectOrNull();
+ }
+};
+
+class LInt32x4ToFloat32x4 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Int32x4ToFloat32x4);
+ explicit LInt32x4ToFloat32x4(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+class LFloat32x4ToInt32x4 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(Float32x4ToInt32x4);
+ explicit LFloat32x4ToInt32x4(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MSimdConvert* mir() const {
+ return mir_->toSimdConvert();
+ }
+};
+
+// Float32x4 to Uint32x4 needs one GPR temp and one FloatReg temp.
+class LFloat32x4ToUint32x4 : public LInstructionHelper<1, 1, 2>
+{
+ public:
+ LIR_HEADER(Float32x4ToUint32x4);
+ explicit LFloat32x4ToUint32x4(const LAllocation& input, const LDefinition& tempR,
+ const LDefinition& tempF)
+ {
+ setOperand(0, input);
+ setTemp(0, tempR);
+ setTemp(1, tempF);
+ }
+ const LDefinition* tempR() {
+ return getTemp(0);
+ }
+ const LDefinition* tempF() {
+ return getTemp(1);
+ }
+ const MSimdConvert* mir() const {
+ return mir_->toSimdConvert();
+ }
+};
+
+// Double raised to a half power.
+class LPowHalfD : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(PowHalfD);
+ explicit LPowHalfD(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ return getDef(0);
+ }
+ MPowHalf* mir() const {
+ return mir_->toPowHalf();
+ }
+};
+
+// No-op instruction that is used to hold the entry snapshot. This simplifies
+// register allocation as it doesn't need to sniff the snapshot out of the
+// LIRGraph.
+class LStart : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(Start)
+};
+
+class LNaNToZero : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(NaNToZero)
+
+ explicit LNaNToZero(const LAllocation& input, const LDefinition& tempDouble) {
+ setOperand(0, input);
+ setTemp(0, tempDouble);
+ }
+
+ const MNaNToZero* mir() {
+ return mir_->toNaNToZero();
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ return getDef(0);
+ }
+ const LDefinition* tempDouble() {
+ return getTemp(0);
+ }
+};
+
+// Passed the BaselineFrame address in the OsrFrameReg by SideCannon().
+// Forwards this object to the LOsrValues for Value materialization.
+class LOsrEntry : public LInstructionHelper<1, 0, 1>
+{
+ protected:
+ Label label_;
+ uint32_t frameDepth_;
+
+ public:
+ LIR_HEADER(OsrEntry)
+
+ explicit LOsrEntry(const LDefinition& temp)
+ : frameDepth_(0)
+ {
+ setTemp(0, temp);
+ }
+
+ void setFrameDepth(uint32_t depth) {
+ frameDepth_ = depth;
+ }
+ uint32_t getFrameDepth() {
+ return frameDepth_;
+ }
+ Label* label() {
+ return &label_;
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Materialize a Value stored in an interpreter frame for OSR.
+class LOsrValue : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrValue)
+
+ explicit LOsrValue(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrValue* mir() {
+ return mir_->toOsrValue();
+ }
+};
+
+// Materialize a JSObject env chain stored in an interpreter frame for OSR.
+class LOsrEnvironmentChain : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrEnvironmentChain)
+
+ explicit LOsrEnvironmentChain(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrEnvironmentChain* mir() {
+ return mir_->toOsrEnvironmentChain();
+ }
+};
+
+// Materialize a JSObject env chain stored in an interpreter frame for OSR.
+class LOsrReturnValue : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrReturnValue)
+
+ explicit LOsrReturnValue(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrReturnValue* mir() {
+ return mir_->toOsrReturnValue();
+ }
+};
+
+// Materialize a JSObject ArgumentsObject stored in an interpreter frame for OSR.
+class LOsrArgumentsObject : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(OsrArgumentsObject)
+
+ explicit LOsrArgumentsObject(const LAllocation& entry)
+ {
+ setOperand(0, entry);
+ }
+
+ const MOsrArgumentsObject* mir() {
+ return mir_->toOsrArgumentsObject();
+ }
+};
+
+class LRegExp : public LCallInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(RegExp)
+
+ const MRegExp* mir() const {
+ return mir_->toRegExp();
+ }
+};
+
+class LRegExpMatcher : public LCallInstructionHelper<BOX_PIECES, 3, 0>
+{
+ public:
+ LIR_HEADER(RegExpMatcher)
+
+ LRegExpMatcher(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() {
+ return getOperand(0);
+ }
+ const LAllocation* string() {
+ return getOperand(1);
+ }
+ const LAllocation* lastIndex() {
+ return getOperand(2);
+ }
+
+ const MRegExpMatcher* mir() const {
+ return mir_->toRegExpMatcher();
+ }
+};
+
+class LRegExpSearcher : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(RegExpSearcher)
+
+ LRegExpSearcher(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() {
+ return getOperand(0);
+ }
+ const LAllocation* string() {
+ return getOperand(1);
+ }
+ const LAllocation* lastIndex() {
+ return getOperand(2);
+ }
+
+ const MRegExpSearcher* mir() const {
+ return mir_->toRegExpSearcher();
+ }
+};
+
+class LRegExpTester : public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(RegExpTester)
+
+ LRegExpTester(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() {
+ return getOperand(0);
+ }
+ const LAllocation* string() {
+ return getOperand(1);
+ }
+ const LAllocation* lastIndex() {
+ return getOperand(2);
+ }
+
+ const MRegExpTester* mir() const {
+ return mir_->toRegExpTester();
+ }
+};
+
+class LRegExpPrototypeOptimizable : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(RegExpPrototypeOptimizable);
+ explicit LRegExpPrototypeOptimizable(const LAllocation& object, const LDefinition& temp) {
+ setOperand(0, object);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRegExpPrototypeOptimizable* mir() const {
+ return mir_->toRegExpPrototypeOptimizable();
+ }
+};
+
+class LRegExpInstanceOptimizable : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(RegExpInstanceOptimizable);
+ explicit LRegExpInstanceOptimizable(const LAllocation& object, const LAllocation& proto,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, proto);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* proto() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRegExpInstanceOptimizable* mir() const {
+ return mir_->toRegExpInstanceOptimizable();
+ }
+};
+
+class LGetFirstDollarIndex : public LInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(GetFirstDollarIndex);
+ explicit LGetFirstDollarIndex(const LAllocation& str, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2) {
+ setOperand(0, str);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LAllocation* str() {
+ return getOperand(0);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+};
+
+class LStringReplace: public LCallInstructionHelper<1, 3, 0>
+{
+ public:
+ LIR_HEADER(StringReplace);
+
+ LStringReplace(const LAllocation& string, const LAllocation& pattern,
+ const LAllocation& replacement)
+ {
+ setOperand(0, string);
+ setOperand(1, pattern);
+ setOperand(2, replacement);
+ }
+
+ const MStringReplace* mir() const {
+ return mir_->toStringReplace();
+ }
+
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+ const LAllocation* pattern() {
+ return getOperand(1);
+ }
+ const LAllocation* replacement() {
+ return getOperand(2);
+ }
+};
+
+class LBinarySharedStub : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(BinarySharedStub)
+
+ LBinarySharedStub(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ const MBinarySharedStub* mir() const {
+ return mir_->toBinarySharedStub();
+ }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+class LUnarySharedStub : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(UnarySharedStub)
+
+ explicit LUnarySharedStub(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ const MUnarySharedStub* mir() const {
+ return mir_->toUnarySharedStub();
+ }
+
+ static const size_t Input = 0;
+};
+
+class LNullarySharedStub : public LCallInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(NullarySharedStub)
+
+ const MNullarySharedStub* mir() const {
+ return mir_->toNullarySharedStub();
+ }
+};
+
+class LLambdaForSingleton : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LambdaForSingleton)
+
+ explicit LLambdaForSingleton(const LAllocation& envChain)
+ {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MLambda* mir() const {
+ return mir_->toLambda();
+ }
+};
+
+class LLambda : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(Lambda)
+
+ LLambda(const LAllocation& envChain, const LDefinition& temp) {
+ setOperand(0, envChain);
+ setTemp(0, temp);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MLambda* mir() const {
+ return mir_->toLambda();
+ }
+};
+
+class LLambdaArrow : public LInstructionHelper<1, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(LambdaArrow)
+
+ static const size_t NewTargetValue = 1;
+
+ LLambdaArrow(const LAllocation& envChain, const LBoxAllocation& newTarget) {
+ setOperand(0, envChain);
+ setBoxOperand(NewTargetValue, newTarget);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MLambdaArrow* mir() const {
+ return mir_->toLambdaArrow();
+ }
+};
+
+class LKeepAliveObject : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(KeepAliveObject)
+
+ explicit LKeepAliveObject(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load the "slots" member out of a JSObject.
+// Input: JSObject pointer
+// Output: slots pointer
+class LSlots : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Slots)
+
+ explicit LSlots(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load the "elements" member out of a JSObject.
+// Input: JSObject pointer
+// Output: elements pointer
+class LElements : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Elements)
+
+ explicit LElements(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const MElements* mir() const {
+ return mir_->toElements();
+ }
+};
+
+// If necessary, convert any int32 elements in a vector into doubles.
+class LConvertElementsToDoubles : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(ConvertElementsToDoubles)
+
+ explicit LConvertElementsToDoubles(const LAllocation& elements) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+};
+
+// If |elements| has the CONVERT_DOUBLE_ELEMENTS flag, convert int32 value to
+// double. Else return the original value.
+class LMaybeToDoubleElement : public LInstructionHelper<BOX_PIECES, 2, 1>
+{
+ public:
+ LIR_HEADER(MaybeToDoubleElement)
+
+ LMaybeToDoubleElement(const LAllocation& elements, const LAllocation& value,
+ const LDefinition& tempFloat) {
+ setOperand(0, elements);
+ setOperand(1, value);
+ setTemp(0, tempFloat);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+};
+
+// If necessary, copy the elements in an object so they may be written to.
+class LMaybeCopyElementsForWrite : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(MaybeCopyElementsForWrite)
+
+ explicit LMaybeCopyElementsForWrite(const LAllocation& obj, const LDefinition& temp) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ const MMaybeCopyElementsForWrite* mir() const {
+ return mir_->toMaybeCopyElementsForWrite();
+ }
+};
+
+// Load the initialized length from an elements header.
+class LInitializedLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(InitializedLength)
+
+ explicit LInitializedLength(const LAllocation& elements) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+};
+
+// Store to the initialized length in an elements header. Note the input is an
+// *index*, one less than the desired initialized length.
+class LSetInitializedLength : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(SetInitializedLength)
+
+ LSetInitializedLength(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LUnboxedArrayLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(UnboxedArrayLength)
+
+ explicit LUnboxedArrayLength(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+class LUnboxedArrayInitializedLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(UnboxedArrayInitializedLength)
+
+ explicit LUnboxedArrayInitializedLength(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+class LIncrementUnboxedArrayInitializedLength : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(IncrementUnboxedArrayInitializedLength)
+
+ explicit LIncrementUnboxedArrayInitializedLength(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+class LSetUnboxedArrayInitializedLength : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(SetUnboxedArrayInitializedLength)
+
+ explicit LSetUnboxedArrayInitializedLength(const LAllocation& object,
+ const LAllocation& length,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Load the length from an elements header.
+class LArrayLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ArrayLength)
+
+ explicit LArrayLength(const LAllocation& elements) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+};
+
+// Store to the length in an elements header. Note the input is an *index*,
+// one less than the desired length.
+class LSetArrayLength : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(SetArrayLength)
+
+ LSetArrayLength(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LGetNextEntryForIterator : public LInstructionHelper<1, 2, 3>
+{
+ public:
+ LIR_HEADER(GetNextEntryForIterator)
+
+ explicit LGetNextEntryForIterator(const LAllocation& iter, const LAllocation& result,
+ const LDefinition& temp0, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setOperand(0, iter);
+ setOperand(1, result);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const MGetNextEntryForIterator* mir() const {
+ return mir_->toGetNextEntryForIterator();
+ }
+ const LAllocation* iter() {
+ return getOperand(0);
+ }
+ const LAllocation* result() {
+ return getOperand(1);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition* temp2() {
+ return getTemp(2);
+ }
+};
+
+// Read the length of a typed array.
+class LTypedArrayLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedArrayLength)
+
+ explicit LTypedArrayLength(const LAllocation& obj) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load a typed array's elements vector.
+class LTypedArrayElements : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedArrayElements)
+
+ explicit LTypedArrayElements(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Assign
+//
+// target[targetOffset..targetOffset + source.length] = source[0..source.length]
+//
+// where the source element range doesn't overlap the target element range in
+// memory.
+class LSetDisjointTypedElements : public LCallInstructionHelper<0, 3, 1>
+{
+ public:
+ LIR_HEADER(SetDisjointTypedElements)
+
+ explicit LSetDisjointTypedElements(const LAllocation& target, const LAllocation& targetOffset,
+ const LAllocation& source, const LDefinition& temp)
+ {
+ setOperand(0, target);
+ setOperand(1, targetOffset);
+ setOperand(2, source);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* target() {
+ return getOperand(0);
+ }
+
+ const LAllocation* targetOffset() {
+ return getOperand(1);
+ }
+
+ const LAllocation* source() {
+ return getOperand(2);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Load a typed object's descriptor.
+class LTypedObjectDescr : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedObjectDescr)
+
+ explicit LTypedObjectDescr(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+};
+
+// Load a typed object's elements vector.
+class LTypedObjectElements : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(TypedObjectElements)
+
+ explicit LTypedObjectElements(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const MTypedObjectElements* mir() const {
+ return mir_->toTypedObjectElements();
+ }
+};
+
+// Load a typed array's elements vector.
+class LSetTypedObjectOffset : public LInstructionHelper<0, 2, 2>
+{
+ public:
+ LIR_HEADER(SetTypedObjectOffset)
+
+ LSetTypedObjectOffset(const LAllocation& object,
+ const LAllocation& offset,
+ const LDefinition& temp0,
+ const LDefinition& temp1)
+ {
+ setOperand(0, object);
+ setOperand(1, offset);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* offset() {
+ return getOperand(1);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+};
+
+// Bailout if index >= length.
+class LBoundsCheck : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(BoundsCheck)
+
+ LBoundsCheck(const LAllocation& index, const LAllocation& length) {
+ setOperand(0, index);
+ setOperand(1, length);
+ }
+ const MBoundsCheck* mir() const {
+ return mir_->toBoundsCheck();
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length.
+class LBoundsCheckRange : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(BoundsCheckRange)
+
+ LBoundsCheckRange(const LAllocation& index, const LAllocation& length,
+ const LDefinition& temp)
+ {
+ setOperand(0, index);
+ setOperand(1, length);
+ setTemp(0, temp);
+ }
+ const MBoundsCheck* mir() const {
+ return mir_->toBoundsCheck();
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+};
+
+// Bailout if index < minimum.
+class LBoundsCheckLower : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(BoundsCheckLower)
+
+ explicit LBoundsCheckLower(const LAllocation& index)
+ {
+ setOperand(0, index);
+ }
+ MBoundsCheckLower* mir() const {
+ return mir_->toBoundsCheckLower();
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+};
+
+// Load a value from a dense array's elements vector. Bail out if it's the hole value.
+class LLoadElementV : public LInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadElementV)
+
+ LLoadElementV(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MLoadElement* mir() const {
+ return mir_->toLoadElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LInArray : public LInstructionHelper<1, 4, 0>
+{
+ public:
+ LIR_HEADER(InArray)
+
+ LInArray(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& initLength, const LAllocation& object)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ setOperand(3, object);
+ }
+ const MInArray* mir() const {
+ return mir_->toInArray();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* initLength() {
+ return getOperand(2);
+ }
+ const LAllocation* object() {
+ return getOperand(3);
+ }
+};
+
+
+// Load a value from an array's elements vector, loading |undefined| if we hit a hole.
+// Bail out if we get a negative index.
+class LLoadElementHole : public LInstructionHelper<BOX_PIECES, 3, 0>
+{
+ public:
+ LIR_HEADER(LoadElementHole)
+
+ LLoadElementHole(const LAllocation& elements, const LAllocation& index, const LAllocation& initLength) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MLoadElementHole* mir() const {
+ return mir_->toLoadElementHole();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* initLength() {
+ return getOperand(2);
+ }
+};
+
+// Load a typed value from a dense array's elements vector. The array must be
+// known to be packed, so that we don't have to check for the hole value.
+// This instruction does not load the type tag and can directly load into a
+// FP register.
+class LLoadElementT : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadElementT)
+
+ LLoadElementT(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck"
+ : (mir()->loadDoubles() ? "Doubles" : nullptr);
+ }
+
+ const MLoadElement* mir() const {
+ return mir_->toLoadElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LLoadUnboxedPointerV : public LInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadUnboxedPointerV)
+
+ LLoadUnboxedPointerV(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const MLoadUnboxedObjectOrNull* mir() const {
+ return mir_->toLoadUnboxedObjectOrNull();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LLoadUnboxedPointerT : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadUnboxedPointerT)
+
+ LLoadUnboxedPointerT(const LAllocation& elements, const LAllocation& index) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ MDefinition* mir() {
+ MOZ_ASSERT(mir_->isLoadUnboxedObjectOrNull() || mir_->isLoadUnboxedString());
+ return mir_;
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LUnboxObjectOrNull : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(UnboxObjectOrNull);
+
+ explicit LUnboxObjectOrNull(const LAllocation& input)
+ {
+ setOperand(0, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+};
+
+// Store a boxed value to a dense array's element vector.
+class LStoreElementV : public LInstructionHelper<0, 2 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(StoreElementV)
+
+ LStoreElementV(const LAllocation& elements, const LAllocation& index,
+ const LBoxAllocation& value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setBoxOperand(Value, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ static const size_t Value = 2;
+
+ const MStoreElement* mir() const {
+ return mir_->toStoreElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+// Store a typed value to a dense array's elements vector. Compared to
+// LStoreElementV, this instruction can store doubles and constants directly,
+// and does not store the type tag if the array is monomorphic and known to
+// be packed.
+class LStoreElementT : public LInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(StoreElementT)
+
+ LStoreElementT(const LAllocation& elements, const LAllocation& index, const LAllocation& value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MStoreElement* mir() const {
+ return mir_->toStoreElement();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+};
+
+// Like LStoreElementV, but supports indexes >= initialized length.
+class LStoreElementHoleV : public LInstructionHelper<0, 3 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(StoreElementHoleV)
+
+ LStoreElementHoleV(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 3;
+
+ const MStoreElementHole* mir() const {
+ return mir_->toStoreElementHole();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+};
+
+// Like LStoreElementT, but supports indexes >= initialized length.
+class LStoreElementHoleT : public LInstructionHelper<0, 4, 1>
+{
+ public:
+ LIR_HEADER(StoreElementHoleT)
+
+ LStoreElementHoleT(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setOperand(3, value);
+ setTemp(0, temp);
+ }
+
+ const MStoreElementHole* mir() const {
+ return mir_->toStoreElementHole();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+ const LAllocation* value() {
+ return getOperand(3);
+ }
+};
+
+// Like LStoreElementV, but can just ignore assignment (for eg. frozen objects)
+class LFallibleStoreElementV : public LInstructionHelper<0, 3 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(FallibleStoreElementV)
+
+ LFallibleStoreElementV(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 3;
+
+ const MFallibleStoreElement* mir() const {
+ return mir_->toFallibleStoreElement();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+};
+
+// Like LStoreElementT, but can just ignore assignment (for eg. frozen objects)
+class LFallibleStoreElementT : public LInstructionHelper<0, 4, 1>
+{
+ public:
+ LIR_HEADER(FallibleStoreElementT)
+
+ LFallibleStoreElementT(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setOperand(3, value);
+ setTemp(0, temp);
+ }
+
+ const MFallibleStoreElement* mir() const {
+ return mir_->toFallibleStoreElement();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* elements() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+ const LAllocation* value() {
+ return getOperand(3);
+ }
+};
+
+class LStoreUnboxedPointer : public LInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(StoreUnboxedPointer)
+
+ LStoreUnboxedPointer(LAllocation elements, LAllocation index, LAllocation value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ MDefinition* mir() {
+ MOZ_ASSERT(mir_->isStoreUnboxedObjectOrNull() || mir_->isStoreUnboxedString());
+ return mir_;
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+};
+
+// If necessary, convert an unboxed object in a particular group to its native
+// representation.
+class LConvertUnboxedObjectToNative : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(ConvertUnboxedObjectToNative)
+
+ explicit LConvertUnboxedObjectToNative(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ MConvertUnboxedObjectToNative* mir() {
+ return mir_->toConvertUnboxedObjectToNative();
+ }
+};
+
+class LArrayPopShiftV : public LInstructionHelper<BOX_PIECES, 1, 2>
+{
+ public:
+ LIR_HEADER(ArrayPopShiftV)
+
+ LArrayPopShiftV(const LAllocation& object, const LDefinition& temp0, const LDefinition& temp1) {
+ setOperand(0, object);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mir()->mode() == MArrayPopShift::Pop ? "Pop" : "Shift";
+ }
+
+ const MArrayPopShift* mir() const {
+ return mir_->toArrayPopShift();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+};
+
+class LArrayPopShiftT : public LInstructionHelper<1, 1, 2>
+{
+ public:
+ LIR_HEADER(ArrayPopShiftT)
+
+ LArrayPopShiftT(const LAllocation& object, const LDefinition& temp0, const LDefinition& temp1) {
+ setOperand(0, object);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mir()->mode() == MArrayPopShift::Pop ? "Pop" : "Shift";
+ }
+
+ const MArrayPopShift* mir() const {
+ return mir_->toArrayPopShift();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+};
+
+class LArrayPushV : public LInstructionHelper<1, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ArrayPushV)
+
+ LArrayPushV(const LAllocation& object, const LBoxAllocation& value, const LDefinition& temp) {
+ setOperand(0, object);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 1;
+
+ const MArrayPush* mir() const {
+ return mir_->toArrayPush();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LArrayPushT : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(ArrayPushT)
+
+ LArrayPushT(const LAllocation& object, const LAllocation& value, const LDefinition& temp) {
+ setOperand(0, object);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MArrayPush* mir() const {
+ return mir_->toArrayPush();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LArraySlice : public LCallInstructionHelper<1, 3, 2>
+{
+ public:
+ LIR_HEADER(ArraySlice)
+
+ LArraySlice(const LAllocation& obj, const LAllocation& begin, const LAllocation& end,
+ const LDefinition& temp1, const LDefinition& temp2) {
+ setOperand(0, obj);
+ setOperand(1, begin);
+ setOperand(2, end);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const MArraySlice* mir() const {
+ return mir_->toArraySlice();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* begin() {
+ return getOperand(1);
+ }
+ const LAllocation* end() {
+ return getOperand(2);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+};
+
+class LArrayJoin : public LCallInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(ArrayJoin)
+
+ LArrayJoin(const LAllocation& array, const LAllocation& sep) {
+ setOperand(0, array);
+ setOperand(1, sep);
+ }
+
+ const MArrayJoin* mir() const {
+ return mir_->toArrayJoin();
+ }
+ const LAllocation* array() {
+ return getOperand(0);
+ }
+ const LAllocation* separator() {
+ return getOperand(1);
+ }
+};
+
+class LLoadUnboxedScalar : public LInstructionHelper<1, 2, 1>
+{
+ public:
+ LIR_HEADER(LoadUnboxedScalar)
+
+ LLoadUnboxedScalar(const LAllocation& elements, const LAllocation& index,
+ const LDefinition& temp) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setTemp(0, temp);
+ }
+ const MLoadUnboxedScalar* mir() const {
+ return mir_->toLoadUnboxedScalar();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+class LLoadTypedArrayElementHole : public LInstructionHelper<BOX_PIECES, 2, 0>
+{
+ public:
+ LIR_HEADER(LoadTypedArrayElementHole)
+
+ LLoadTypedArrayElementHole(const LAllocation& object, const LAllocation& index) {
+ setOperand(0, object);
+ setOperand(1, index);
+ }
+ const MLoadTypedArrayElementHole* mir() const {
+ return mir_->toLoadTypedArrayElementHole();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LLoadTypedArrayElementStatic : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadTypedArrayElementStatic);
+ explicit LLoadTypedArrayElementStatic(const LAllocation& ptr) {
+ setOperand(0, ptr);
+ }
+ MLoadTypedArrayElementStatic* mir() const {
+ return mir_->toLoadTypedArrayElementStatic();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+};
+
+class LStoreUnboxedScalar : public LInstructionHelper<0, 3, 0>
+{
+ public:
+ LIR_HEADER(StoreUnboxedScalar)
+
+ LStoreUnboxedScalar(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const MStoreUnboxedScalar* mir() const {
+ return mir_->toStoreUnboxedScalar();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+};
+
+class LStoreTypedArrayElementHole : public LInstructionHelper<0, 4, 0>
+{
+ public:
+ LIR_HEADER(StoreTypedArrayElementHole)
+
+ LStoreTypedArrayElementHole(const LAllocation& elements, const LAllocation& length,
+ const LAllocation& index, const LAllocation& value)
+ {
+ setOperand(0, elements);
+ setOperand(1, length);
+ setOperand(2, index);
+ setOperand(3, value);
+ }
+
+ const MStoreTypedArrayElementHole* mir() const {
+ return mir_->toStoreTypedArrayElementHole();
+ }
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* length() {
+ return getOperand(1);
+ }
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+ const LAllocation* value() {
+ return getOperand(3);
+ }
+};
+
+class LStoreTypedArrayElementStatic : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(StoreTypedArrayElementStatic);
+ LStoreTypedArrayElementStatic(const LAllocation& ptr, const LAllocation& value) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ }
+ MStoreTypedArrayElementStatic* mir() const {
+ return mir_->toStoreTypedArrayElementStatic();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+class LAtomicIsLockFree : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AtomicIsLockFree)
+
+ explicit LAtomicIsLockFree(const LAllocation& value) {
+ setOperand(0, value);
+ }
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+};
+
+class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4>
+{
+ public:
+ LIR_HEADER(CompareExchangeTypedArrayElement)
+
+ LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& oldval, const LAllocation& newval,
+ const LDefinition& temp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ }
+ LCompareExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& oldval, const LAllocation& newval,
+ const LDefinition& temp, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* oldval() {
+ return getOperand(2);
+ }
+ const LAllocation* newval() {
+ return getOperand(3);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ const MCompareExchangeTypedArrayElement* mir() const {
+ return mir_->toCompareExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4>
+{
+ public:
+ LIR_HEADER(AtomicExchangeTypedArrayElement)
+
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ }
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ const MAtomicExchangeTypedArrayElement* mir() const {
+ return mir_->toAtomicExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5>
+{
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinop)
+
+ static const int32_t valueOp = 2;
+
+ LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp1,
+ const LDefinition& temp2)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ LAtomicTypedArrayElementBinop(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 2);
+ return getOperand(2);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(3);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(4);
+ }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAtomicTypedArrayElementBinopForEffect : public LInstructionHelper<0, 3, 4>
+{
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinopForEffect)
+
+ LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, flagTemp);
+ }
+ LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LDefinition& flagTemp,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, flagTemp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+ const LAllocation* value() {
+ return getOperand(2);
+ }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() {
+ return getTemp(0);
+ }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+class LEffectiveAddress : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(EffectiveAddress);
+
+ LEffectiveAddress(const LAllocation& base, const LAllocation& index) {
+ setOperand(0, base);
+ setOperand(1, index);
+ }
+ const MEffectiveAddress* mir() const {
+ return mir_->toEffectiveAddress();
+ }
+ const LAllocation* base() {
+ return getOperand(0);
+ }
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+};
+
+class LClampIToUint8 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(ClampIToUint8)
+
+ explicit LClampIToUint8(const LAllocation& in) {
+ setOperand(0, in);
+ }
+};
+
+class LClampDToUint8 : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(ClampDToUint8)
+
+ LClampDToUint8(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+};
+
+class LClampVToUint8 : public LInstructionHelper<1, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(ClampVToUint8)
+
+ LClampVToUint8(const LBoxAllocation& input, const LDefinition& tempFloat) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempFloat);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() {
+ return getTemp(0);
+ }
+ const MClampToUint8* mir() const {
+ return mir_->toClampToUint8();
+ }
+};
+
+// Load a boxed value from an object's fixed slot.
+class LLoadFixedSlotV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadFixedSlotV)
+
+ explicit LLoadFixedSlotV(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const MLoadFixedSlot* mir() const {
+ return mir_->toLoadFixedSlot();
+ }
+};
+
+// Load a typed value from an object's fixed slot.
+class LLoadFixedSlotT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadFixedSlotT)
+
+ explicit LLoadFixedSlotT(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const MLoadFixedSlot* mir() const {
+ return mir_->toLoadFixedSlot();
+ }
+};
+
+class LLoadFixedSlotAndUnbox : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadFixedSlotAndUnbox)
+
+ explicit LLoadFixedSlotAndUnbox(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const MLoadFixedSlotAndUnbox* mir() const {
+ return mir_->toLoadFixedSlotAndUnbox();
+ }
+};
+
+// Store a boxed value to an object's fixed slot.
+class LStoreFixedSlotV : public LInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(StoreFixedSlotV)
+
+ LStoreFixedSlotV(const LAllocation& obj, const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MStoreFixedSlot* mir() const {
+ return mir_->toStoreFixedSlot();
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+};
+
+// Store a typed value to an object's fixed slot.
+class LStoreFixedSlotT : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(StoreFixedSlotT)
+
+ LStoreFixedSlotT(const LAllocation& obj, const LAllocation& value)
+ {
+ setOperand(0, obj);
+ setOperand(1, value);
+ }
+ const MStoreFixedSlot* mir() const {
+ return mir_->toStoreFixedSlot();
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+// Note, Name ICs always return a Value. There are no V/T variants.
+class LGetNameCache : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetNameCache)
+
+ explicit LGetNameCache(const LAllocation& envObj) {
+ setOperand(0, envObj);
+ }
+ const LAllocation* envObj() {
+ return getOperand(0);
+ }
+ const MGetNameCache* mir() const {
+ return mir_->toGetNameCache();
+ }
+};
+
+class LCallGetIntrinsicValue : public LCallInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(CallGetIntrinsicValue)
+
+ const MCallGetIntrinsicValue* mir() const {
+ return mir_->toCallGetIntrinsicValue();
+ }
+};
+
+// Patchable jump to stubs generated for a GetProperty cache, which loads a
+// boxed value.
+class LGetPropertyCacheV : public LInstructionHelper<BOX_PIECES, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(GetPropertyCacheV)
+
+ static const size_t Id = 1;
+
+ LGetPropertyCacheV(const LAllocation& object, const LBoxAllocation& id) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ }
+ const MGetPropertyCache* mir() const {
+ return mir_->toGetPropertyCache();
+ }
+};
+
+// Patchable jump to stubs generated for a GetProperty cache, which loads a
+// value of a known type, possibly into an FP register.
+class LGetPropertyCacheT : public LInstructionHelper<1, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(GetPropertyCacheT)
+
+ static const size_t Id = 1;
+
+ LGetPropertyCacheT(const LAllocation& object, const LBoxAllocation& id) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ }
+ const MGetPropertyCache* mir() const {
+ return mir_->toGetPropertyCache();
+ }
+};
+
+// Emit code to load a boxed value from an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LGetPropertyPolymorphicV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetPropertyPolymorphicV)
+
+ explicit LGetPropertyPolymorphicV(const LAllocation& obj) {
+ setOperand(0, obj);
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const MGetPropertyPolymorphic* mir() const {
+ return mir_->toGetPropertyPolymorphic();
+ }
+ virtual const char* extraName() const {
+ return PropertyNameToExtraName(mir()->name());
+ }
+};
+
+// Emit code to load a typed value from an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LGetPropertyPolymorphicT : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(GetPropertyPolymorphicT)
+
+ LGetPropertyPolymorphicT(const LAllocation& obj, const LDefinition& temp) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MGetPropertyPolymorphic* mir() const {
+ return mir_->toGetPropertyPolymorphic();
+ }
+ virtual const char* extraName() const {
+ return PropertyNameToExtraName(mir()->name());
+ }
+};
+
+// Emit code to store a boxed value to an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LSetPropertyPolymorphicV : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(SetPropertyPolymorphicV)
+
+ LSetPropertyPolymorphicV(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 1;
+
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MSetPropertyPolymorphic* mir() const {
+ return mir_->toSetPropertyPolymorphic();
+ }
+};
+
+// Emit code to store a typed value to an object's slots if its shape matches
+// one of the shapes observed by the baseline IC, else bails out.
+class LSetPropertyPolymorphicT : public LInstructionHelper<0, 2, 1>
+{
+ MIRType valueType_;
+
+ public:
+ LIR_HEADER(SetPropertyPolymorphicT)
+
+ LSetPropertyPolymorphicT(const LAllocation& obj, const LAllocation& value, MIRType valueType,
+ const LDefinition& temp)
+ : valueType_(valueType)
+ {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* obj() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MIRType valueType() const {
+ return valueType_;
+ }
+ const MSetPropertyPolymorphic* mir() const {
+ return mir_->toSetPropertyPolymorphic();
+ }
+ const char* extraName() const {
+ return StringFromMIRType(valueType_);
+ }
+};
+
+class LBindNameCache : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(BindNameCache)
+
+ explicit LBindNameCache(const LAllocation& envChain) {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MBindNameCache* mir() const {
+ return mir_->toBindNameCache();
+ }
+};
+
+class LCallBindVar : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CallBindVar)
+
+ explicit LCallBindVar(const LAllocation& envChain) {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() {
+ return getOperand(0);
+ }
+ const MCallBindVar* mir() const {
+ return mir_->toCallBindVar();
+ }
+};
+
+// Load a value from an object's dslots or a slots vector.
+class LLoadSlotV : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadSlotV)
+
+ explicit LLoadSlotV(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const MLoadSlot* mir() const {
+ return mir_->toLoadSlot();
+ }
+};
+
+// Load a typed value from an object's dslots or a slots vector. Unlike
+// LLoadSlotV, this can bypass extracting a type tag, directly retrieving a
+// pointer, integer, or double.
+class LLoadSlotT : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadSlotT)
+
+ explicit LLoadSlotT(const LAllocation& slots) {
+ setOperand(0, slots);
+ }
+ const LAllocation* slots() {
+ return getOperand(0);
+ }
+ const LDefinition* output() {
+ return this->getDef(0);
+ }
+ const MLoadSlot* mir() const {
+ return mir_->toLoadSlot();
+ }
+};
+
+// Store a value to an object's dslots or a slots vector.
+class LStoreSlotV : public LInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(StoreSlotV)
+
+ LStoreSlotV(const LAllocation& slots, const LBoxAllocation& value) {
+ setOperand(0, slots);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MStoreSlot* mir() const {
+ return mir_->toStoreSlot();
+ }
+ const LAllocation* slots() {
+ return getOperand(0);
+ }
+};
+
+// Store a typed value to an object's dslots or a slots vector. This has a
+// few advantages over LStoreSlotV:
+// 1) We can bypass storing the type tag if the slot has the same type as
+// the value.
+// 2) Better register allocation: we can store constants and FP regs directly
+// without requiring a second register for the value.
+class LStoreSlotT : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(StoreSlotT)
+
+ LStoreSlotT(const LAllocation& slots, const LAllocation& value) {
+ setOperand(0, slots);
+ setOperand(1, value);
+ }
+ const MStoreSlot* mir() const {
+ return mir_->toStoreSlot();
+ }
+ const LAllocation* slots() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+// Read length field of a JSString*.
+class LStringLength : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(StringLength)
+
+ explicit LStringLength(const LAllocation& string) {
+ setOperand(0, string);
+ }
+
+ const LAllocation* string() {
+ return getOperand(0);
+ }
+};
+
+// Take the floor of a double precision number. Implements Math.floor().
+class LFloor : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Floor)
+
+ explicit LFloor(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Take the floor of a single precision number. Implements Math.floor().
+class LFloorF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FloorF)
+
+ explicit LFloorF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Take the ceiling of a double precision number. Implements Math.ceil().
+class LCeil : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(Ceil)
+
+ explicit LCeil(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Take the ceiling of a single precision number. Implements Math.ceil().
+class LCeilF : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CeilF)
+
+ explicit LCeilF(const LAllocation& num) {
+ setOperand(0, num);
+ }
+};
+
+// Round a double precision number. Implements Math.round().
+class LRound : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(Round)
+
+ LRound(const LAllocation& num, const LDefinition& temp) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRound* mir() const {
+ return mir_->toRound();
+ }
+};
+
+// Round a single precision number. Implements Math.round().
+class LRoundF : public LInstructionHelper<1, 1, 1>
+{
+ public:
+ LIR_HEADER(RoundF)
+
+ LRoundF(const LAllocation& num, const LDefinition& temp) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MRound* mir() const {
+ return mir_->toRound();
+ }
+};
+
+// Load a function's call environment.
+class LFunctionEnvironment : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(FunctionEnvironment)
+
+ explicit LFunctionEnvironment(const LAllocation& function) {
+ setOperand(0, function);
+ }
+ const LAllocation* function() {
+ return getOperand(0);
+ }
+};
+
+class LCallGetProperty : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallGetProperty)
+
+ static const size_t Value = 0;
+
+ explicit LCallGetProperty(const LBoxAllocation& val) {
+ setBoxOperand(Value, val);
+ }
+
+ MCallGetProperty* mir() const {
+ return mir_->toCallGetProperty();
+ }
+};
+
+// Call js::GetElement.
+class LCallGetElement : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallGetElement)
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+
+ LCallGetElement(const LBoxAllocation& lhs, const LBoxAllocation& rhs) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ }
+
+ MCallGetElement* mir() const {
+ return mir_->toCallGetElement();
+ }
+};
+
+// Call js::SetElement.
+class LCallSetElement : public LCallInstructionHelper<0, 1 + 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallSetElement)
+
+ static const size_t Index = 1;
+ static const size_t Value = 1 + BOX_PIECES;
+
+ LCallSetElement(const LAllocation& obj, const LBoxAllocation& index,
+ const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Index, index);
+ setBoxOperand(Value, value);
+ }
+
+ const MCallSetElement* mir() const {
+ return mir_->toCallSetElement();
+ }
+};
+
+// Call js::InitElementArray.
+class LCallInitElementArray : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+public:
+ LIR_HEADER(CallInitElementArray)
+
+ static const size_t Value = 1;
+
+ LCallInitElementArray(const LAllocation& obj, const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ const MCallInitElementArray* mir() const {
+ return mir_->toCallInitElementArray();
+ }
+};
+
+// Call a VM function to perform a property or name assignment of a generic value.
+class LCallSetProperty : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallSetProperty)
+
+ LCallSetProperty(const LAllocation& obj, const LBoxAllocation& value) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MCallSetProperty* mir() const {
+ return mir_->toCallSetProperty();
+ }
+};
+
+class LCallDeleteProperty : public LCallInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallDeleteProperty)
+
+ static const size_t Value = 0;
+
+ explicit LCallDeleteProperty(const LBoxAllocation& value) {
+ setBoxOperand(Value, value);
+ }
+
+ MDeleteProperty* mir() const {
+ return mir_->toDeleteProperty();
+ }
+};
+
+class LCallDeleteElement : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallDeleteElement)
+
+ static const size_t Value = 0;
+ static const size_t Index = BOX_PIECES;
+
+ LCallDeleteElement(const LBoxAllocation& value, const LBoxAllocation& index) {
+ setBoxOperand(Value, value);
+ setBoxOperand(Index, index);
+ }
+
+ MDeleteElement* mir() const {
+ return mir_->toDeleteElement();
+ }
+};
+
+// Patchable jump to stubs generated for a SetProperty cache.
+class LSetPropertyCache : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 4>
+{
+ public:
+ LIR_HEADER(SetPropertyCache)
+
+ LSetPropertyCache(const LAllocation& object, const LBoxAllocation& id,
+ const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnboxIndex, const LDefinition& tempDouble,
+ const LDefinition& tempFloat32) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnboxIndex);
+ setTemp(2, tempDouble);
+ setTemp(3, tempFloat32);
+ }
+
+ static const size_t Id = 1;
+ static const size_t Value = 1 + BOX_PIECES;
+
+ const MSetPropertyCache* mir() const {
+ return mir_->toSetPropertyCache();
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* tempToUnboxIndex() {
+ return getTemp(1);
+ }
+ const LDefinition* tempDouble() {
+ return getTemp(2);
+ }
+ const LDefinition* tempFloat32() {
+ if (hasUnaliasedDouble())
+ return getTemp(3);
+ return getTemp(2);
+ }
+};
+
+class LCallIteratorStartV : public LCallInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CallIteratorStartV)
+
+ static const size_t Value = 0;
+
+ explicit LCallIteratorStartV(const LBoxAllocation& value) {
+ setBoxOperand(Value, value);
+ }
+ MIteratorStart* mir() const {
+ return mir_->toIteratorStart();
+ }
+};
+
+class LCallIteratorStartO : public LCallInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(CallIteratorStartO)
+
+ explicit LCallIteratorStartO(const LAllocation& object) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ MIteratorStart* mir() const {
+ return mir_->toIteratorStart();
+ }
+};
+
+class LIteratorStartO : public LInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(IteratorStartO)
+
+ LIteratorStartO(const LAllocation& object, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, object);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+ MIteratorStart* mir() const {
+ return mir_->toIteratorStart();
+ }
+};
+
+class LIteratorMore : public LInstructionHelper<BOX_PIECES, 1, 1>
+{
+ public:
+ LIR_HEADER(IteratorMore)
+
+ LIteratorMore(const LAllocation& iterator, const LDefinition& temp) {
+ setOperand(0, iterator);
+ setTemp(0, temp);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ MIteratorMore* mir() const {
+ return mir_->toIteratorMore();
+ }
+};
+
+class LIsNoIterAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(IsNoIterAndBranch)
+
+ LIsNoIterAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse, const LBoxAllocation& input) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+class LIteratorEnd : public LInstructionHelper<0, 1, 3>
+{
+ public:
+ LIR_HEADER(IteratorEnd)
+
+ LIteratorEnd(const LAllocation& iterator, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3) {
+ setOperand(0, iterator);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+ const LDefinition* temp3() {
+ return getTemp(2);
+ }
+ MIteratorEnd* mir() const {
+ return mir_->toIteratorEnd();
+ }
+};
+
+// Read the number of actual arguments.
+class LArgumentsLength : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(ArgumentsLength)
+};
+
+// Load a value from the actual arguments.
+class LGetFrameArgument : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(GetFrameArgument)
+
+ explicit LGetFrameArgument(const LAllocation& index) {
+ setOperand(0, index);
+ }
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+};
+
+// Load a value from the actual arguments.
+class LSetFrameArgumentT : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(SetFrameArgumentT)
+
+ explicit LSetFrameArgumentT(const LAllocation& input) {
+ setOperand(0, input);
+ }
+ MSetFrameArgument* mir() const {
+ return mir_->toSetFrameArgument();
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+};
+
+// Load a value from the actual arguments.
+class LSetFrameArgumentC : public LInstructionHelper<0, 0, 0>
+{
+ Value val_;
+
+ public:
+ LIR_HEADER(SetFrameArgumentC)
+
+ explicit LSetFrameArgumentC(const Value& val) {
+ val_ = val;
+ }
+ MSetFrameArgument* mir() const {
+ return mir_->toSetFrameArgument();
+ }
+ const Value& val() const {
+ return val_;
+ }
+};
+
+// Load a value from the actual arguments.
+class LSetFrameArgumentV : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(SetFrameArgumentV)
+
+ explicit LSetFrameArgumentV(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MSetFrameArgument* mir() const {
+ return mir_->toSetFrameArgument();
+ }
+};
+
+class LRunOncePrologue : public LCallInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(RunOncePrologue)
+
+ MRunOncePrologue* mir() const {
+ return mir_->toRunOncePrologue();
+ }
+};
+
+// Create the rest parameter.
+class LRest : public LCallInstructionHelper<1, 1, 3>
+{
+ public:
+ LIR_HEADER(Rest)
+
+ LRest(const LAllocation& numActuals, const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ {
+ setOperand(0, numActuals);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* numActuals() {
+ return getOperand(0);
+ }
+ MRest* mir() const {
+ return mir_->toRest();
+ }
+};
+
+class LGuardReceiverPolymorphic : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardReceiverPolymorphic)
+
+ LGuardReceiverPolymorphic(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const MGuardReceiverPolymorphic* mir() const {
+ return mir_->toGuardReceiverPolymorphic();
+ }
+};
+
+class LGuardUnboxedExpando : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(GuardUnboxedExpando)
+
+ explicit LGuardUnboxedExpando(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const MGuardUnboxedExpando* mir() const {
+ return mir_->toGuardUnboxedExpando();
+ }
+};
+
+class LLoadUnboxedExpando : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(LoadUnboxedExpando)
+
+ explicit LLoadUnboxedExpando(const LAllocation& in) {
+ setOperand(0, in);
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const MLoadUnboxedExpando* mir() const {
+ return mir_->toLoadUnboxedExpando();
+ }
+};
+
+// Guard that a value is in a TypeSet.
+class LTypeBarrierV : public LInstructionHelper<0, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(TypeBarrierV)
+
+ LTypeBarrierV(const LBoxAllocation& input, const LDefinition& temp) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ const MTypeBarrier* mir() const {
+ return mir_->toTypeBarrier();
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Guard that a object is in a TypeSet.
+class LTypeBarrierO : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(TypeBarrierO)
+
+ LTypeBarrierO(const LAllocation& obj, const LDefinition& temp) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+ const MTypeBarrier* mir() const {
+ return mir_->toTypeBarrier();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Guard that a value is in a TypeSet.
+class LMonitorTypes : public LInstructionHelper<0, BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(MonitorTypes)
+
+ LMonitorTypes(const LBoxAllocation& input, const LDefinition& temp) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ const MMonitorTypes* mir() const {
+ return mir_->toMonitorTypes();
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing an object to another object.
+class LPostWriteBarrierO : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(PostWriteBarrierO)
+
+ LPostWriteBarrierO(const LAllocation& obj, const LAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteBarrier* mir() const {
+ return mir_->toPostWriteBarrier();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing a value to another object.
+class LPostWriteBarrierV : public LInstructionHelper<0, 1 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(PostWriteBarrierV)
+
+ LPostWriteBarrierV(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp) {
+ setOperand(0, obj);
+ setBoxOperand(Input, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 1;
+
+ const MPostWriteBarrier* mir() const {
+ return mir_->toPostWriteBarrier();
+ }
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing an object to another object's
+// elements.
+class LPostWriteElementBarrierO : public LInstructionHelper<0, 3, 1>
+{
+ public:
+ LIR_HEADER(PostWriteElementBarrierO)
+
+ LPostWriteElementBarrierO(const LAllocation& obj, const LAllocation& value,
+ const LAllocation& index, const LDefinition& temp) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setOperand(2, index);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+
+ const LAllocation* index() {
+ return getOperand(2);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Generational write barrier used when writing a value to another object's
+// elements.
+class LPostWriteElementBarrierV : public LInstructionHelper<0, 2 + BOX_PIECES, 1>
+{
+ public:
+ LIR_HEADER(PostWriteElementBarrierV)
+
+ LPostWriteElementBarrierV(const LAllocation& obj, const LAllocation& index,
+ const LBoxAllocation& value, const LDefinition& temp) {
+ setOperand(0, obj);
+ setOperand(1, index);
+ setBoxOperand(Input, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 2;
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+
+ const LAllocation* index() {
+ return getOperand(1);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+};
+
+// Guard against an object's identity.
+class LGuardObjectIdentity : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(GuardObjectIdentity)
+
+ explicit LGuardObjectIdentity(const LAllocation& in, const LAllocation& expected) {
+ setOperand(0, in);
+ setOperand(1, expected);
+ }
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LAllocation* expected() {
+ return getOperand(1);
+ }
+ const MGuardObjectIdentity* mir() const {
+ return mir_->toGuardObjectIdentity();
+ }
+};
+
+// Guard against an object's class.
+class LGuardClass : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardClass)
+
+ LGuardClass(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardClass* mir() const {
+ return mir_->toGuardClass();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+// Guard against the sharedness of a TypedArray's memory.
+class LGuardSharedTypedArray : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardSharedTypedArray)
+
+ LGuardSharedTypedArray(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardSharedTypedArray* mir() const {
+ return mir_->toGuardSharedTypedArray();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LIn : public LCallInstructionHelper<1, BOX_PIECES+1, 0>
+{
+ public:
+ LIR_HEADER(In)
+ LIn(const LBoxAllocation& lhs, const LAllocation& rhs) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(LHS);
+ }
+ const LAllocation* rhs() {
+ return getOperand(RHS);
+ }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LInstanceOfO : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(InstanceOfO)
+ explicit LInstanceOfO(const LAllocation& lhs) {
+ setOperand(0, lhs);
+ }
+
+ MInstanceOf* mir() const {
+ return mir_->toInstanceOf();
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+};
+
+class LInstanceOfV : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(InstanceOfV)
+ explicit LInstanceOfV(const LBoxAllocation& lhs) {
+ setBoxOperand(LHS, lhs);
+ }
+
+ MInstanceOf* mir() const {
+ return mir_->toInstanceOf();
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(LHS);
+ }
+
+ static const size_t LHS = 0;
+};
+
+class LCallInstanceOf : public LCallInstructionHelper<1, BOX_PIECES+1, 0>
+{
+ public:
+ LIR_HEADER(CallInstanceOf)
+ LCallInstanceOf(const LBoxAllocation& lhs, const LAllocation& rhs) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ const LDefinition* output() {
+ return this->getDef(0);
+ }
+ const LAllocation* lhs() {
+ return getOperand(LHS);
+ }
+ const LAllocation* rhs() {
+ return getOperand(RHS);
+ }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LIsCallable : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IsCallable);
+ explicit LIsCallable(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ MIsCallable* mir() const {
+ return mir_->toIsCallable();
+ }
+};
+
+class LIsConstructor : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(IsConstructor);
+ explicit LIsConstructor(const LAllocation& object) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() {
+ return getOperand(0);
+ }
+ MIsConstructor* mir() const {
+ return mir_->toIsConstructor();
+ }
+};
+
+class LIsObject : public LInstructionHelper<1, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(IsObject);
+ static const size_t Input = 0;
+
+ explicit LIsObject(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MIsObject* mir() const {
+ return mir_->toIsObject();
+ }
+};
+
+class LIsObjectAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(IsObjectAndBranch)
+
+ LIsObjectAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse, const LBoxAllocation& input) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const {
+ return getSuccessor(0);
+ }
+ MBasicBlock* ifFalse() const {
+ return getSuccessor(1);
+ }
+};
+
+class LHasClass : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(HasClass);
+ explicit LHasClass(const LAllocation& lhs) {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* lhs() {
+ return getOperand(0);
+ }
+ MHasClass* mir() const {
+ return mir_->toHasClass();
+ }
+};
+
+template<size_t Defs, size_t Ops>
+class LWasmSelectBase : public LInstructionHelper<Defs, Ops, 0>
+{
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+ public:
+
+ MWasmSelect* mir() const {
+ return Base::mir_->toWasmSelect();
+ }
+};
+
+class LWasmSelect : public LWasmSelectBase<1, 3>
+{
+ public:
+ LIR_HEADER(WasmSelect);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = 1;
+ static const size_t CondIndex = 2;
+
+ LWasmSelect(const LAllocation& trueExpr, const LAllocation& falseExpr,
+ const LAllocation& cond)
+ {
+ setOperand(TrueExprIndex, trueExpr);
+ setOperand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LAllocation* trueExpr() {
+ return getOperand(TrueExprIndex);
+ }
+ const LAllocation* falseExpr() {
+ return getOperand(FalseExprIndex);
+ }
+ const LAllocation* condExpr() {
+ return getOperand(CondIndex);
+ }
+};
+
+class LWasmSelectI64 : public LWasmSelectBase<INT64_PIECES, 2 * INT64_PIECES + 1>
+{
+ public:
+ LIR_HEADER(WasmSelectI64);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = INT64_PIECES;
+ static const size_t CondIndex = INT64_PIECES * 2;
+
+ LWasmSelectI64(const LInt64Allocation& trueExpr, const LInt64Allocation& falseExpr,
+ const LAllocation& cond)
+ {
+ setInt64Operand(TrueExprIndex, trueExpr);
+ setInt64Operand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LInt64Allocation trueExpr() {
+ return getInt64Operand(TrueExprIndex);
+ }
+ const LInt64Allocation falseExpr() {
+ return getInt64Operand(FalseExprIndex);
+ }
+ const LAllocation* condExpr() {
+ return getOperand(CondIndex);
+ }
+};
+
+class LWasmAddOffset : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmAddOffset);
+ explicit LWasmAddOffset(const LAllocation& base) {
+ setOperand(0, base);
+ }
+ MWasmAddOffset* mir() const {
+ return mir_->toWasmAddOffset();
+ }
+ const LAllocation* base() {
+ return getOperand(0);
+ }
+};
+
+class LWasmBoundsCheck : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmBoundsCheck);
+ explicit LWasmBoundsCheck(const LAllocation& ptr) {
+ setOperand(0, ptr);
+ }
+ MWasmBoundsCheck* mir() const {
+ return mir_->toWasmBoundsCheck();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+};
+
+namespace details {
+
+// This is a base class for LWasmLoad/LWasmLoadI64.
+template<size_t Defs, size_t Temp>
+class LWasmLoadBase : public LInstructionHelper<Defs, 1, Temp>
+{
+ public:
+ typedef LInstructionHelper<Defs, 1, Temp> Base;
+ explicit LWasmLoadBase(const LAllocation& ptr) {
+ Base::setOperand(0, ptr);
+ }
+ MWasmLoad* mir() const {
+ return Base::mir_->toWasmLoad();
+ }
+ const LAllocation* ptr() {
+ return Base::getOperand(0);
+ }
+};
+
+} // namespace details
+
+class LWasmLoad : public details::LWasmLoadBase<1, 1>
+{
+ public:
+ explicit LWasmLoad(const LAllocation& ptr)
+ : LWasmLoadBase(ptr)
+ {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+
+ LIR_HEADER(WasmLoad);
+};
+
+class LWasmLoadI64 : public details::LWasmLoadBase<INT64_PIECES, 1>
+{
+ public:
+ explicit LWasmLoadI64(const LAllocation& ptr)
+ : LWasmLoadBase(ptr)
+ {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+
+ LIR_HEADER(WasmLoadI64);
+};
+
+class LWasmStore : public LInstructionHelper<0, 2, 1>
+{
+ public:
+ LIR_HEADER(WasmStore);
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmStore(const LAllocation& ptr, const LAllocation& value) {
+ setOperand(PtrIndex, ptr);
+ setOperand(ValueIndex, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const {
+ return mir_->toWasmStore();
+ }
+ const LAllocation* ptr() {
+ return getOperand(PtrIndex);
+ }
+ const LDefinition* ptrCopy() {
+ return getTemp(0);
+ }
+ const LAllocation* value() {
+ return getOperand(ValueIndex);
+ }
+};
+
+class LWasmStoreI64 : public LInstructionHelper<0, INT64_PIECES + 1, 1>
+{
+ public:
+ LIR_HEADER(WasmStoreI64);
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmStoreI64(const LAllocation& ptr, const LInt64Allocation& value) {
+ setOperand(PtrIndex, ptr);
+ setInt64Operand(ValueIndex, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const {
+ return mir_->toWasmStore();
+ }
+ const LAllocation* ptr() {
+ return getOperand(PtrIndex);
+ }
+ const LDefinition* ptrCopy() {
+ return getTemp(0);
+ }
+ const LInt64Allocation value() {
+ return getInt64Operand(ValueIndex);
+ }
+};
+
+class LAsmJSLoadHeap : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(AsmJSLoadHeap);
+ explicit LAsmJSLoadHeap(const LAllocation& ptr) {
+ setOperand(0, ptr);
+ }
+ MAsmJSLoadHeap* mir() const {
+ return mir_->toAsmJSLoadHeap();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+};
+
+class LAsmJSStoreHeap : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(AsmJSStoreHeap);
+ LAsmJSStoreHeap(const LAllocation& ptr, const LAllocation& value) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ }
+ MAsmJSStoreHeap* mir() const {
+ return mir_->toAsmJSStoreHeap();
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+};
+
+class LAsmJSCompareExchangeHeap : public LInstructionHelper<1, 3, 4>
+{
+ public:
+ LIR_HEADER(AsmJSCompareExchangeHeap);
+
+ LAsmJSCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue)
+ {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ LAsmJSCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* oldValue() {
+ return getOperand(1);
+ }
+ const LAllocation* newValue() {
+ return getOperand(2);
+ }
+ const LDefinition* addrTemp() {
+ return getTemp(0);
+ }
+
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(0, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ MAsmJSCompareExchangeHeap* mir() const {
+ return mir_->toAsmJSCompareExchangeHeap();
+ }
+};
+
+class LAsmJSAtomicExchangeHeap : public LInstructionHelper<1, 2, 4>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicExchangeHeap);
+
+ LAsmJSAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ LAsmJSAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+ const LDefinition* addrTemp() {
+ return getTemp(0);
+ }
+
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(0, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(1);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(3);
+ }
+
+ MAsmJSAtomicExchangeHeap* mir() const {
+ return mir_->toAsmJSAtomicExchangeHeap();
+ }
+};
+
+class LAsmJSAtomicBinopHeap : public LInstructionHelper<1, 2, 6>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicBinopHeap);
+
+ static const int32_t valueOp = 1;
+
+ LAsmJSAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, temp);
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, flagTemp);
+ }
+ LAsmJSAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& temp, const LDefinition& flagTemp,
+ const LDefinition& valueTemp, const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, temp);
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, flagTemp);
+ setTemp(3, valueTemp);
+ setTemp(4, offsetTemp);
+ setTemp(5, maskTemp);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 1);
+ return getOperand(1);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() {
+ return getTemp(1);
+ }
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(1, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() {
+ return getTemp(2);
+ }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(3);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(4);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(5);
+ }
+
+ MAsmJSAtomicBinopHeap* mir() const {
+ return mir_->toAsmJSAtomicBinopHeap();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAsmJSAtomicBinopHeapForEffect : public LInstructionHelper<0, 2, 5>
+{
+ public:
+ LIR_HEADER(AsmJSAtomicBinopHeapForEffect);
+ LAsmJSAtomicBinopHeapForEffect(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, flagTemp);
+ }
+ LAsmJSAtomicBinopHeapForEffect(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& flagTemp, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, flagTemp);
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+ const LAllocation* ptr() {
+ return getOperand(0);
+ }
+ const LAllocation* value() {
+ return getOperand(1);
+ }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() {
+ return getTemp(0);
+ }
+ void setAddrTemp(const LDefinition& addrTemp) {
+ setTemp(0, addrTemp);
+ }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() {
+ return getTemp(1);
+ }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() {
+ return getTemp(2);
+ }
+ const LDefinition* offsetTemp() {
+ return getTemp(3);
+ }
+ const LDefinition* maskTemp() {
+ return getTemp(4);
+ }
+
+ MAsmJSAtomicBinopHeap* mir() const {
+ return mir_->toAsmJSAtomicBinopHeap();
+ }
+};
+
+class LWasmLoadGlobalVar : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmLoadGlobalVar);
+ MWasmLoadGlobalVar* mir() const {
+ return mir_->toWasmLoadGlobalVar();
+ }
+};
+
+class LWasmLoadGlobalVarI64 : public LInstructionHelper<INT64_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmLoadGlobalVarI64);
+ MWasmLoadGlobalVar* mir() const {
+ return mir_->toWasmLoadGlobalVar();
+ }
+};
+
+class LWasmStoreGlobalVar : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmStoreGlobalVar);
+ explicit LWasmStoreGlobalVar(const LAllocation& value) {
+ setOperand(0, value);
+ }
+ MWasmStoreGlobalVar* mir() const {
+ return mir_->toWasmStoreGlobalVar();
+ }
+ const LAllocation* value() {
+ return getOperand(0);
+ }
+};
+
+class LWasmStoreGlobalVarI64 : public LInstructionHelper<0, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(WasmStoreGlobalVarI64);
+ explicit LWasmStoreGlobalVarI64(const LInt64Allocation& value) {
+ setInt64Operand(0, value);
+ }
+ MWasmStoreGlobalVar* mir() const {
+ return mir_->toWasmStoreGlobalVar();
+ }
+ static const uint32_t InputIndex = 0;
+
+ const LInt64Allocation value() {
+ return getInt64Operand(InputIndex);
+ }
+};
+
+class LWasmParameter : public LInstructionHelper<1, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmParameter);
+};
+
+class LWasmParameterI64 : public LInstructionHelper<INT64_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(WasmParameterI64);
+};
+
+class LWasmReturn : public LInstructionHelper<0, 2, 0>
+{
+ public:
+ LIR_HEADER(WasmReturn);
+};
+
+class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0>
+{
+ public:
+ LIR_HEADER(WasmReturnI64)
+
+ explicit LWasmReturnI64(const LInt64Allocation& input) {
+ setInt64Operand(0, input);
+ }
+};
+
+class LWasmReturnVoid : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmReturnVoid);
+};
+
+class LWasmStackArg : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmStackArg);
+ explicit LWasmStackArg(const LAllocation& arg) {
+ setOperand(0, arg);
+ }
+ MWasmStackArg* mir() const {
+ return mirRaw()->toWasmStackArg();
+ }
+ const LAllocation* arg() {
+ return getOperand(0);
+ }
+};
+
+class LWasmStackArgI64 : public LInstructionHelper<0, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(WasmStackArgI64);
+ explicit LWasmStackArgI64(const LInt64Allocation& arg) {
+ setInt64Operand(0, arg);
+ }
+ MWasmStackArg* mir() const {
+ return mirRaw()->toWasmStackArg();
+ }
+ const LInt64Allocation arg() {
+ return getInt64Operand(0);
+ }
+};
+
+class LWasmCallBase : public LInstruction
+{
+ LAllocation* operands_;
+ uint32_t numOperands_;
+
+ public:
+
+ LWasmCallBase(LAllocation* operands, uint32_t numOperands)
+ : operands_(operands),
+ numOperands_(numOperands)
+ {}
+
+ MWasmCall* mir() const {
+ return mir_->toWasmCall();
+ }
+
+ bool isCall() const override {
+ return true;
+ }
+ bool isCallPreserved(AnyRegister reg) const override {
+ // All MWasmCalls preserve the TLS register:
+ // - internal/indirect calls do by the internal wasm ABI
+ // - import calls do by explicitly saving/restoring at the callsite
+ // - builtin calls do because the TLS reg is non-volatile
+ return !reg.isFloat() && reg.gpr() == WasmTlsReg;
+ }
+
+ // LInstruction interface
+ size_t numOperands() const override {
+ return numOperands_;
+ }
+ LAllocation* getOperand(size_t index) override {
+ MOZ_ASSERT(index < numOperands_);
+ return &operands_[index];
+ }
+ void setOperand(size_t index, const LAllocation& a) override {
+ MOZ_ASSERT(index < numOperands_);
+ operands_[index] = a;
+ }
+ size_t numTemps() const override {
+ return 0;
+ }
+ LDefinition* getTemp(size_t index) override {
+ MOZ_CRASH("no temps");
+ }
+ void setTemp(size_t index, const LDefinition& a) override {
+ MOZ_CRASH("no temps");
+ }
+ size_t numSuccessors() const override {
+ return 0;
+ }
+ MBasicBlock* getSuccessor(size_t i) const override {
+ MOZ_CRASH("no successors");
+ }
+ void setSuccessor(size_t i, MBasicBlock*) override {
+ MOZ_CRASH("no successors");
+ }
+};
+
+class LWasmCall : public LWasmCallBase
+{
+ LDefinition def_;
+
+ public:
+ LIR_HEADER(WasmCall);
+
+ LWasmCall(LAllocation* operands, uint32_t numOperands)
+ : LWasmCallBase(operands, numOperands),
+ def_(LDefinition::BogusTemp())
+ {}
+
+ // LInstruction interface
+ size_t numDefs() const {
+ return def_.isBogusTemp() ? 0 : 1;
+ }
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(numDefs() == 1);
+ MOZ_ASSERT(index == 0);
+ return &def_;
+ }
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index == 0);
+ def_ = def;
+ }
+};
+
+class LWasmCallI64 : public LWasmCallBase
+{
+ LDefinition defs_[INT64_PIECES];
+
+ public:
+ LIR_HEADER(WasmCallI64);
+
+ LWasmCallI64(LAllocation* operands, uint32_t numOperands)
+ : LWasmCallBase(operands, numOperands)
+ {
+ for (size_t i = 0; i < numDefs(); i++)
+ defs_[i] = LDefinition::BogusTemp();
+ }
+
+ // LInstruction interface
+ size_t numDefs() const {
+ return INT64_PIECES;
+ }
+ LDefinition* getDef(size_t index) {
+ MOZ_ASSERT(index < numDefs());
+ return &defs_[index];
+ }
+ void setDef(size_t index, const LDefinition& def) {
+ MOZ_ASSERT(index < numDefs());
+ defs_[index] = def;
+ }
+};
+
+class LAssertRangeI : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(AssertRangeI)
+
+ explicit LAssertRangeI(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertRangeD : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(AssertRangeD)
+
+ LAssertRangeD(const LAllocation& input, const LDefinition& temp) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertRangeF : public LInstructionHelper<0, 1, 2>
+{
+ public:
+ LIR_HEADER(AssertRangeF)
+ LAssertRangeF(const LAllocation& input, const LDefinition& temp, const LDefinition& temp2) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* temp2() {
+ return getTemp(1);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertRangeV : public LInstructionHelper<0, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(AssertRangeV)
+
+ LAssertRangeV(const LBoxAllocation& input, const LDefinition& temp,
+ const LDefinition& floatTemp1, const LDefinition& floatTemp2)
+ {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ setTemp(1, floatTemp1);
+ setTemp(2, floatTemp2);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
+ const LDefinition* floatTemp1() {
+ return getTemp(1);
+ }
+ const LDefinition* floatTemp2() {
+ return getTemp(2);
+ }
+
+ MAssertRange* mir() {
+ return mir_->toAssertRange();
+ }
+ const Range* range() {
+ return mir()->assertedRange();
+ }
+};
+
+class LAssertResultT : public LInstructionHelper<0, 1, 0>
+{
+ public:
+ LIR_HEADER(AssertResultT)
+
+ explicit LAssertResultT(const LAllocation& input) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() {
+ return getOperand(0);
+ }
+};
+
+class LAssertResultV : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(AssertResultV)
+
+ static const size_t Input = 0;
+
+ explicit LAssertResultV(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+};
+
+class LRecompileCheck : public LInstructionHelper<0, 0, 1>
+{
+ public:
+ LIR_HEADER(RecompileCheck)
+
+ explicit LRecompileCheck(const LDefinition& scratch) {
+ setTemp(0, scratch);
+ }
+
+ const LDefinition* scratch() {
+ return getTemp(0);
+ }
+ MRecompileCheck* mir() {
+ return mir_->toRecompileCheck();
+ }
+};
+
+class LLexicalCheck : public LInstructionHelper<0, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(LexicalCheck)
+
+ explicit LLexicalCheck(const LBoxAllocation& input) {
+ setBoxOperand(Input, input);
+ }
+
+ MLexicalCheck* mir() {
+ return mir_->toLexicalCheck();
+ }
+
+ static const size_t Input = 0;
+};
+
+class LThrowRuntimeLexicalError : public LCallInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(ThrowRuntimeLexicalError)
+
+ MThrowRuntimeLexicalError* mir() {
+ return mir_->toThrowRuntimeLexicalError();
+ }
+};
+
+class LGlobalNameConflictsCheck : public LInstructionHelper<0, 0, 0>
+{
+ public:
+ LIR_HEADER(GlobalNameConflictsCheck)
+
+ MGlobalNameConflictsCheck* mir() {
+ return mir_->toGlobalNameConflictsCheck();
+ }
+};
+
+class LMemoryBarrier : public LInstructionHelper<0, 0, 0>
+{
+ private:
+ const MemoryBarrierBits type_;
+
+ public:
+ LIR_HEADER(MemoryBarrier)
+
+ // The parameter 'type' is a bitwise 'or' of the barrier types needed,
+ // see AtomicOp.h.
+ explicit LMemoryBarrier(MemoryBarrierBits type) : type_(type)
+ {
+ MOZ_ASSERT((type_ & ~MembarAllbits) == MembarNobits);
+ }
+
+ MemoryBarrierBits type() const {
+ return type_;
+ }
+};
+
+class LDebugger : public LCallInstructionHelper<0, 0, 2>
+{
+ public:
+ LIR_HEADER(Debugger)
+
+ LDebugger(const LDefinition& temp1, const LDefinition& temp2) {
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+};
+
+class LNewTarget : public LInstructionHelper<BOX_PIECES, 0, 0>
+{
+ public:
+ LIR_HEADER(NewTarget)
+};
+
+class LArrowNewTarget : public LInstructionHelper<BOX_PIECES, 1, 0>
+{
+ public:
+ explicit LArrowNewTarget(const LAllocation& callee) {
+ setOperand(0, callee);
+ }
+
+ LIR_HEADER(ArrowNewTarget)
+
+ const LAllocation* callee() {
+ return getOperand(0);
+ }
+};
+
+// Math.random().
+#ifdef JS_PUNBOX64
+# define LRANDOM_NUM_TEMPS 3
+#else
+# define LRANDOM_NUM_TEMPS 5
+#endif
+
+class LRandom : public LInstructionHelper<1, 0, LRANDOM_NUM_TEMPS>
+{
+ public:
+ LIR_HEADER(Random)
+ LRandom(const LDefinition &temp0, const LDefinition &temp1,
+ const LDefinition &temp2
+#ifndef JS_PUNBOX64
+ , const LDefinition &temp3, const LDefinition &temp4
+#endif
+ )
+ {
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+#ifndef JS_PUNBOX64
+ setTemp(3, temp3);
+ setTemp(4, temp4);
+#endif
+ }
+ const LDefinition* temp0() {
+ return getTemp(0);
+ }
+ const LDefinition* temp1() {
+ return getTemp(1);
+ }
+ const LDefinition *temp2() {
+ return getTemp(2);
+ }
+#ifndef JS_PUNBOX64
+ const LDefinition *temp3() {
+ return getTemp(3);
+ }
+ const LDefinition *temp4() {
+ return getTemp(4);
+ }
+#endif
+
+ MRandom* mir() const {
+ return mir_->toRandom();
+ }
+};
+
+class LCheckReturn : public LCallInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CheckReturn)
+
+ LCheckReturn(const LBoxAllocation& retVal, const LBoxAllocation& thisVal) {
+ setBoxOperand(ReturnValue, retVal);
+ setBoxOperand(ThisValue, thisVal);
+ }
+
+ static const size_t ReturnValue = 0;
+ static const size_t ThisValue = BOX_PIECES;
+};
+
+class LCheckIsObj : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CheckIsObj)
+
+ static const size_t CheckValue = 0;
+
+ explicit LCheckIsObj(const LBoxAllocation& value) {
+ setBoxOperand(CheckValue, value);
+ }
+
+ MCheckIsObj* mir() const {
+ return mir_->toCheckIsObj();
+ }
+};
+
+class LCheckObjCoercible : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(CheckObjCoercible)
+
+ static const size_t CheckValue = 0;
+
+ explicit LCheckObjCoercible(const LBoxAllocation& value) {
+ setBoxOperand(CheckValue, value);
+ }
+};
+
+class LDebugCheckSelfHosted : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
+{
+ public:
+ LIR_HEADER(DebugCheckSelfHosted)
+
+ static const size_t CheckValue = 0;
+
+ explicit LDebugCheckSelfHosted(const LBoxAllocation& value) {
+ setBoxOperand(CheckValue, value);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_LIR_shared_h */
diff --git a/js/src/jit/shared/LOpcodes-shared.h b/js/src/jit/shared/LOpcodes-shared.h
new file mode 100644
index 000000000..bb04553a6
--- /dev/null
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -0,0 +1,441 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_LOpcodes_shared_h
+#define jit_shared_LOpcodes_shared_h
+
+#define LIR_COMMON_OPCODE_LIST(_) \
+ _(Unbox) \
+ _(Box) \
+ _(UnboxFloatingPoint) \
+ _(OsiPoint) \
+ _(MoveGroup) \
+ _(Integer) \
+ _(Integer64) \
+ _(Pointer) \
+ _(Double) \
+ _(Float32) \
+ _(SimdBox) \
+ _(SimdUnbox) \
+ _(SimdSplatX16) \
+ _(SimdSplatX8) \
+ _(SimdSplatX4) \
+ _(Simd128Int) \
+ _(Simd128Float) \
+ _(SimdAllTrue) \
+ _(SimdAnyTrue) \
+ _(SimdReinterpretCast) \
+ _(SimdExtractElementI) \
+ _(SimdExtractElementU2D) \
+ _(SimdExtractElementB) \
+ _(SimdExtractElementF) \
+ _(SimdInsertElementI) \
+ _(SimdInsertElementF) \
+ _(SimdGeneralShuffleI) \
+ _(SimdGeneralShuffleF) \
+ _(SimdSwizzleI) \
+ _(SimdSwizzleF) \
+ _(SimdShuffle) \
+ _(SimdShuffleX4) \
+ _(SimdUnaryArithIx16) \
+ _(SimdUnaryArithIx8) \
+ _(SimdUnaryArithIx4) \
+ _(SimdUnaryArithFx4) \
+ _(SimdBinaryCompIx16) \
+ _(SimdBinaryCompIx8) \
+ _(SimdBinaryCompIx4) \
+ _(SimdBinaryCompFx4) \
+ _(SimdBinaryArithIx16) \
+ _(SimdBinaryArithIx8) \
+ _(SimdBinaryArithIx4) \
+ _(SimdBinaryArithFx4) \
+ _(SimdBinarySaturating) \
+ _(SimdBinaryBitwise) \
+ _(SimdShift) \
+ _(SimdSelect) \
+ _(Value) \
+ _(CloneLiteral) \
+ _(Parameter) \
+ _(Callee) \
+ _(IsConstructing) \
+ _(TableSwitch) \
+ _(TableSwitchV) \
+ _(Goto) \
+ _(NewArray) \
+ _(NewArrayCopyOnWrite) \
+ _(NewArrayDynamicLength) \
+ _(NewTypedArray) \
+ _(NewTypedArrayDynamicLength) \
+ _(ArraySplice) \
+ _(NewObject) \
+ _(NewTypedObject) \
+ _(NewNamedLambdaObject) \
+ _(NewCallObject) \
+ _(NewSingletonCallObject) \
+ _(NewStringObject) \
+ _(NewDerivedTypedObject) \
+ _(InitElem) \
+ _(InitElemGetterSetter) \
+ _(MutateProto) \
+ _(InitProp) \
+ _(InitPropGetterSetter) \
+ _(CheckOverRecursed) \
+ _(DefVar) \
+ _(DefLexical) \
+ _(DefFun) \
+ _(CallKnown) \
+ _(CallGeneric) \
+ _(CallNative) \
+ _(ApplyArgsGeneric) \
+ _(ApplyArrayGeneric) \
+ _(Bail) \
+ _(Unreachable) \
+ _(EncodeSnapshot) \
+ _(GetDynamicName) \
+ _(CallDirectEval) \
+ _(StackArgT) \
+ _(StackArgV) \
+ _(CreateThis) \
+ _(CreateThisWithProto) \
+ _(CreateThisWithTemplate) \
+ _(CreateArgumentsObject) \
+ _(GetArgumentsObjectArg) \
+ _(SetArgumentsObjectArg) \
+ _(ReturnFromCtor) \
+ _(ComputeThis) \
+ _(BitNotI) \
+ _(BitNotV) \
+ _(BitOpI) \
+ _(BitOpI64) \
+ _(BitOpV) \
+ _(ShiftI) \
+ _(ShiftI64) \
+ _(SignExtend) \
+ _(UrshD) \
+ _(Return) \
+ _(Throw) \
+ _(Phi) \
+ _(TestIAndBranch) \
+ _(TestI64AndBranch) \
+ _(TestDAndBranch) \
+ _(TestFAndBranch) \
+ _(TestVAndBranch) \
+ _(TestOAndBranch) \
+ _(FunctionDispatch) \
+ _(ObjectGroupDispatch) \
+ _(Compare) \
+ _(CompareAndBranch) \
+ _(CompareI64) \
+ _(CompareI64AndBranch) \
+ _(CompareD) \
+ _(CompareDAndBranch) \
+ _(CompareF) \
+ _(CompareFAndBranch) \
+ _(CompareS) \
+ _(CompareStrictS) \
+ _(CompareB) \
+ _(CompareBAndBranch) \
+ _(CompareBitwise) \
+ _(CompareBitwiseAndBranch) \
+ _(CompareVM) \
+ _(BitAndAndBranch) \
+ _(IsNullOrLikeUndefinedV) \
+ _(IsNullOrLikeUndefinedT) \
+ _(IsNullOrLikeUndefinedAndBranchV)\
+ _(IsNullOrLikeUndefinedAndBranchT)\
+ _(MinMaxI) \
+ _(MinMaxD) \
+ _(MinMaxF) \
+ _(NegI) \
+ _(NegD) \
+ _(NegF) \
+ _(AbsI) \
+ _(AbsD) \
+ _(AbsF) \
+ _(ClzI) \
+ _(ClzI64) \
+ _(CtzI) \
+ _(CtzI64) \
+ _(PopcntI) \
+ _(PopcntI64) \
+ _(SqrtD) \
+ _(SqrtF) \
+ _(CopySignD) \
+ _(CopySignF) \
+ _(Atan2D) \
+ _(Hypot) \
+ _(PowI) \
+ _(PowD) \
+ _(PowHalfD) \
+ _(Random) \
+ _(MathFunctionD) \
+ _(MathFunctionF) \
+ _(NotI) \
+ _(NotI64) \
+ _(NotD) \
+ _(NotF) \
+ _(NotO) \
+ _(NotV) \
+ _(AddI) \
+ _(AddI64) \
+ _(SubI) \
+ _(SubI64) \
+ _(MulI) \
+ _(MulI64) \
+ _(MathD) \
+ _(MathF) \
+ _(DivI) \
+ _(DivPowTwoI) \
+ _(ModI) \
+ _(ModPowTwoI) \
+ _(ModD) \
+ _(BinaryV) \
+ _(Concat) \
+ _(CharCodeAt) \
+ _(FromCharCode) \
+ _(FromCodePoint) \
+ _(SinCos) \
+ _(StringSplit) \
+ _(Int32ToDouble) \
+ _(Float32ToDouble) \
+ _(DoubleToFloat32) \
+ _(Int32ToFloat32) \
+ _(ValueToDouble) \
+ _(ValueToInt32) \
+ _(ValueToFloat32) \
+ _(DoubleToInt32) \
+ _(Float32ToInt32) \
+ _(TruncateDToInt32) \
+ _(TruncateFToInt32) \
+ _(WrapInt64ToInt32) \
+ _(ExtendInt32ToInt64) \
+ _(BooleanToString) \
+ _(IntToString) \
+ _(DoubleToString) \
+ _(ValueToString) \
+ _(ValueToObjectOrNull) \
+ _(Int32x4ToFloat32x4) \
+ _(Float32x4ToInt32x4) \
+ _(Float32x4ToUint32x4) \
+ _(Start) \
+ _(NaNToZero) \
+ _(OsrEntry) \
+ _(OsrValue) \
+ _(OsrEnvironmentChain) \
+ _(OsrReturnValue) \
+ _(OsrArgumentsObject) \
+ _(RegExp) \
+ _(RegExpMatcher) \
+ _(RegExpSearcher) \
+ _(RegExpTester) \
+ _(RegExpPrototypeOptimizable) \
+ _(RegExpInstanceOptimizable) \
+ _(GetFirstDollarIndex) \
+ _(StringReplace) \
+ _(Substr) \
+ _(BinarySharedStub) \
+ _(UnarySharedStub) \
+ _(NullarySharedStub) \
+ _(Lambda) \
+ _(LambdaArrow) \
+ _(LambdaForSingleton) \
+ _(KeepAliveObject) \
+ _(Slots) \
+ _(Elements) \
+ _(ConvertElementsToDoubles) \
+ _(MaybeToDoubleElement) \
+ _(MaybeCopyElementsForWrite) \
+ _(LoadSlotV) \
+ _(LoadSlotT) \
+ _(StoreSlotV) \
+ _(StoreSlotT) \
+ _(GuardShape) \
+ _(GuardReceiverPolymorphic) \
+ _(GuardObjectGroup) \
+ _(GuardObjectIdentity) \
+ _(GuardClass) \
+ _(GuardUnboxedExpando) \
+ _(LoadUnboxedExpando) \
+ _(TypeBarrierV) \
+ _(TypeBarrierO) \
+ _(MonitorTypes) \
+ _(PostWriteBarrierO) \
+ _(PostWriteBarrierV) \
+ _(PostWriteElementBarrierO) \
+ _(PostWriteElementBarrierV) \
+ _(InitializedLength) \
+ _(SetInitializedLength) \
+ _(UnboxedArrayLength) \
+ _(UnboxedArrayInitializedLength) \
+ _(IncrementUnboxedArrayInitializedLength) \
+ _(SetUnboxedArrayInitializedLength) \
+ _(BoundsCheck) \
+ _(BoundsCheckRange) \
+ _(BoundsCheckLower) \
+ _(LoadElementV) \
+ _(LoadElementT) \
+ _(LoadElementHole) \
+ _(LoadUnboxedScalar) \
+ _(LoadUnboxedPointerV) \
+ _(LoadUnboxedPointerT) \
+ _(UnboxObjectOrNull) \
+ _(StoreElementV) \
+ _(StoreElementT) \
+ _(StoreUnboxedScalar) \
+ _(StoreUnboxedPointer) \
+ _(ConvertUnboxedObjectToNative) \
+ _(ArrayPopShiftV) \
+ _(ArrayPopShiftT) \
+ _(ArrayPushV) \
+ _(ArrayPushT) \
+ _(ArraySlice) \
+ _(ArrayJoin) \
+ _(StoreElementHoleV) \
+ _(StoreElementHoleT) \
+ _(FallibleStoreElementV) \
+ _(FallibleStoreElementT) \
+ _(LoadTypedArrayElementHole) \
+ _(LoadTypedArrayElementStatic) \
+ _(StoreTypedArrayElementHole) \
+ _(StoreTypedArrayElementStatic) \
+ _(AtomicIsLockFree) \
+ _(GuardSharedTypedArray) \
+ _(CompareExchangeTypedArrayElement) \
+ _(AtomicExchangeTypedArrayElement) \
+ _(AtomicTypedArrayElementBinop) \
+ _(AtomicTypedArrayElementBinopForEffect) \
+ _(EffectiveAddress) \
+ _(ClampIToUint8) \
+ _(ClampDToUint8) \
+ _(ClampVToUint8) \
+ _(LoadFixedSlotV) \
+ _(LoadFixedSlotT) \
+ _(LoadFixedSlotAndUnbox) \
+ _(StoreFixedSlotV) \
+ _(StoreFixedSlotT) \
+ _(FunctionEnvironment) \
+ _(GetPropertyCacheV) \
+ _(GetPropertyCacheT) \
+ _(GetPropertyPolymorphicV) \
+ _(GetPropertyPolymorphicT) \
+ _(BindNameCache) \
+ _(CallBindVar) \
+ _(CallGetProperty) \
+ _(GetNameCache) \
+ _(CallGetIntrinsicValue) \
+ _(CallGetElement) \
+ _(CallSetElement) \
+ _(CallInitElementArray) \
+ _(CallSetProperty) \
+ _(CallDeleteProperty) \
+ _(CallDeleteElement) \
+ _(SetPropertyCache) \
+ _(SetPropertyPolymorphicV) \
+ _(SetPropertyPolymorphicT) \
+ _(CallIteratorStartV) \
+ _(CallIteratorStartO) \
+ _(IteratorStartO) \
+ _(IteratorMore) \
+ _(IsNoIterAndBranch) \
+ _(IteratorEnd) \
+ _(ArrayLength) \
+ _(SetArrayLength) \
+ _(GetNextEntryForIterator) \
+ _(TypedArrayLength) \
+ _(TypedArrayElements) \
+ _(SetDisjointTypedElements) \
+ _(TypedObjectDescr) \
+ _(TypedObjectElements) \
+ _(SetTypedObjectOffset) \
+ _(StringLength) \
+ _(ArgumentsLength) \
+ _(GetFrameArgument) \
+ _(SetFrameArgumentT) \
+ _(SetFrameArgumentC) \
+ _(SetFrameArgumentV) \
+ _(RunOncePrologue) \
+ _(Rest) \
+ _(TypeOfV) \
+ _(ToAsync) \
+ _(ToIdV) \
+ _(Floor) \
+ _(FloorF) \
+ _(Ceil) \
+ _(CeilF) \
+ _(Round) \
+ _(RoundF) \
+ _(In) \
+ _(InArray) \
+ _(InstanceOfO) \
+ _(InstanceOfV) \
+ _(CallInstanceOf) \
+ _(InterruptCheck) \
+ _(Rotate) \
+ _(RotateI64) \
+ _(GetDOMProperty) \
+ _(GetDOMMemberV) \
+ _(GetDOMMemberT) \
+ _(SetDOMProperty) \
+ _(CallDOMNative) \
+ _(IsCallable) \
+ _(IsConstructor) \
+ _(IsObject) \
+ _(IsObjectAndBranch) \
+ _(HasClass) \
+ _(RecompileCheck) \
+ _(MemoryBarrier) \
+ _(AssertRangeI) \
+ _(AssertRangeD) \
+ _(AssertRangeF) \
+ _(AssertRangeV) \
+ _(AssertResultV) \
+ _(AssertResultT) \
+ _(LexicalCheck) \
+ _(ThrowRuntimeLexicalError) \
+ _(GlobalNameConflictsCheck) \
+ _(Debugger) \
+ _(NewTarget) \
+ _(ArrowNewTarget) \
+ _(CheckReturn) \
+ _(CheckIsObj) \
+ _(CheckObjCoercible) \
+ _(DebugCheckSelfHosted) \
+ _(AsmJSLoadHeap) \
+ _(AsmJSStoreHeap) \
+ _(AsmJSCompareExchangeHeap) \
+ _(AsmJSAtomicExchangeHeap) \
+ _(AsmJSAtomicBinopHeap) \
+ _(AsmJSAtomicBinopHeapForEffect)\
+ _(WasmTruncateToInt32) \
+ _(WasmTrap) \
+ _(WasmReinterpret) \
+ _(WasmReinterpretToI64) \
+ _(WasmReinterpretFromI64) \
+ _(WasmSelect) \
+ _(WasmSelectI64) \
+ _(WasmBoundsCheck) \
+ _(WasmAddOffset) \
+ _(WasmLoad) \
+ _(WasmLoadI64) \
+ _(WasmStore) \
+ _(WasmStoreI64) \
+ _(WasmLoadGlobalVar) \
+ _(WasmLoadGlobalVarI64) \
+ _(WasmStoreGlobalVar) \
+ _(WasmStoreGlobalVarI64) \
+ _(WasmParameter) \
+ _(WasmParameterI64) \
+ _(WasmReturn) \
+ _(WasmReturnI64) \
+ _(WasmReturnVoid) \
+ _(WasmStackArg) \
+ _(WasmStackArgI64) \
+ _(WasmCall) \
+ _(WasmCallI64) \
+ _(WasmUint32ToDouble) \
+ _(WasmUint32ToFloat32)
+
+#endif /* jit_shared_LOpcodes_shared_h */
diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
new file mode 100644
index 000000000..61f1d3302
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -0,0 +1,858 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_inl_h
+#define jit_shared_Lowering_shared_inl_h
+
+#include "jit/shared/Lowering-shared.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+void
+LIRGeneratorShared::emitAtUses(MInstruction* mir)
+{
+ MOZ_ASSERT(mir->canEmitAtUses());
+ mir->setEmittedAtUses();
+ mir->setVirtualRegister(0);
+}
+
+LUse
+LIRGeneratorShared::use(MDefinition* mir, LUse policy)
+{
+ // It is illegal to call use() on an instruction with two defs.
+#if BOX_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Value);
+#endif
+#if INT64_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Int64);
+#endif
+ ensureDefined(mir);
+ policy.setVirtualRegister(mir->virtualRegister());
+ return policy;
+}
+
+template <size_t X> void
+LIRGeneratorShared::define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ LDefinition::Policy policy)
+{
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ define(lir, mir, LDefinition(type, policy));
+}
+
+template <size_t X> void
+LIRGeneratorShared::define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ const LDefinition& def)
+{
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ // Assign the definition and a virtual register. Then, propagate this
+ // virtual register to the MIR, so we can map MIR to LIR during lowering.
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t X, size_t Y> void
+LIRGeneratorShared::defineFixed(LInstructionHelper<1, X, Y>* lir, MDefinition* mir, const LAllocation& output)
+{
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::FIXED);
+ def.setOutput(output);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineInt64Fixed(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output)
+{
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 64
+ LDefinition def(LDefinition::GENERAL, LDefinition::FIXED);
+ def.setOutput(output.value());
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+#else
+ LDefinition def0(LDefinition::GENERAL, LDefinition::FIXED);
+ def0.setOutput(output.low());
+ lir->setDef(0, def0);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+ getVirtualRegister();
+ LDefinition def1(LDefinition::GENERAL, LDefinition::FIXED);
+ def1.setOutput(output.high());
+ lir->setDef(1, def1);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir, uint32_t operand)
+{
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(operand);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
+ MDefinition* mir, uint32_t operand)
+{
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
+#endif
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ LDefinition def1(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def1.setReusedInput(operand);
+ lir->setDef(0, def1);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+#if JS_BITS_PER_WORD == 32
+ getVirtualRegister();
+ LDefinition def2(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def2.setReusedInput(operand + 1);
+ lir->setDef(1, def2);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy)
+{
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ lir->setDef(0, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE, policy));
+ lir->setDef(1, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD, policy));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy)
+{
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 32
+ lir->setDef(0, LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL, policy));
+ lir->setDef(1, LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL, policy));
+ getVirtualRegister();
+#else
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorShared::defineSharedStubReturn(LInstruction* lir, MDefinition* mir)
+{
+ lir->setMir(mir);
+
+ MOZ_ASSERT(lir->isBinarySharedStub() || lir->isUnarySharedStub() || lir->isNullarySharedStub());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
+ LGeneralReg(JSReturnReg_Type)));
+ lir->setDef(PAYLOAD_INDEX, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
+ LGeneralReg(JSReturnReg_Data)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
+#endif
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir)
+{
+ lir->setMir(mir);
+
+ MOZ_ASSERT(lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ switch (mir->type()) {
+ case MIRType::Value:
+#if defined(JS_NUNBOX32)
+ lir->setDef(TYPE_INDEX, LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
+ LGeneralReg(JSReturnReg_Type)));
+ lir->setDef(PAYLOAD_INDEX, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
+ LGeneralReg(JSReturnReg_Data)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
+#endif
+ break;
+ case MIRType::Int64:
+#if defined(JS_NUNBOX32)
+ lir->setDef(INT64LOW_INDEX, LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.low)));
+ lir->setDef(INT64HIGH_INDEX, LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.high)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL, LGeneralReg(ReturnReg)));
+#endif
+ break;
+ case MIRType::Float32:
+ lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32, LFloatReg(ReturnFloat32Reg)));
+ break;
+ case MIRType::Double:
+ lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128INT, LFloatReg(ReturnSimd128Reg)));
+ break;
+ case MIRType::Float32x4:
+ lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128FLOAT, LFloatReg(ReturnSimd128Reg)));
+ break;
+ default:
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ MOZ_ASSERT(type != LDefinition::DOUBLE && type != LDefinition::FLOAT32);
+ lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ReturnReg)));
+ break;
+ }
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps> void
+LIRGeneratorShared::defineSinCos(LInstructionHelper<2, Ops, Temps> *lir, MDefinition *mir,
+ LDefinition::Policy policy)
+{
+ MOZ_ASSERT(lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+ lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE, LFloatReg(ReturnDoubleReg)));
+#if defined(JS_CODEGEN_ARM)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE,
+ LFloatReg(FloatRegister(FloatRegisters::d1, FloatRegister::Double))));
+#elif defined(JS_CODEGEN_ARM64)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE,
+ LFloatReg(FloatRegister(FloatRegisters::d1, FloatRegisters::Double))));
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE, LFloatReg(f2)));
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ lir->setDef(1, LDefinition(vreg + VREG_INCREMENT, LDefinition::DOUBLE, LFloatReg(xmm1)));
+#else
+#error "Unsupported architecture for SinCos"
+#endif
+
+ getVirtualRegister();
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+
+ return;
+}
+
+// In LIR, we treat booleans and integers as the same low-level type (INTEGER).
+// When snapshotting, we recover the actual JS type from MIR. This function
+// checks that when making redefinitions, we don't accidentally coerce two
+// incompatible types.
+static inline bool
+IsCompatibleLIRCoercion(MIRType to, MIRType from)
+{
+ if (to == from)
+ return true;
+ if ((to == MIRType::Int32 || to == MIRType::Boolean) &&
+ (from == MIRType::Int32 || from == MIRType::Boolean)) {
+ return true;
+ }
+ // SIMD types can be coerced with from*Bits operators.
+ if (IsSimdType(to) && IsSimdType(from))
+ return true;
+ return false;
+}
+
+
+// We can redefine the sin(x) and cos(x) function to return the sincos result.
+void
+LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as, MMathFunction::Function func)
+{
+ MOZ_ASSERT(def->isMathFunction());
+ MOZ_ASSERT(def->type() == MIRType::Double && as->type() == MIRType::SinCosDouble);
+ MOZ_ASSERT(MMathFunction::Sin == func || MMathFunction::Cos == func);
+
+ ensureDefined(as);
+ MMathFunction *math = def->toMathFunction();
+
+ MOZ_ASSERT(math->function() == MMathFunction::Cos ||
+ math->function() == MMathFunction::Sin);
+
+ // The sincos returns two values:
+ // - VREG: it returns the sin's value of the sincos;
+ // - VREG + VREG_INCREMENT: it returns the cos' value of the sincos.
+ if (math->function() == MMathFunction::Sin)
+ def->setVirtualRegister(as->virtualRegister());
+ else
+ def->setVirtualRegister(as->virtualRegister() + VREG_INCREMENT);
+}
+
+void
+LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as)
+{
+ MOZ_ASSERT(IsCompatibleLIRCoercion(def->type(), as->type()));
+
+ // Try to emit MIR marked as emitted-at-uses at, well, uses. For
+ // snapshotting reasons we delay the MIRTypes match, or when we are
+ // coercing between bool and int32 constants.
+ if (as->isEmittedAtUses() &&
+ (def->type() == as->type() ||
+ (as->isConstant() &&
+ (def->type() == MIRType::Int32 || def->type() == MIRType::Boolean) &&
+ (as->type() == MIRType::Int32 || as->type() == MIRType::Boolean))))
+ {
+ MInstruction* replacement;
+ if (def->type() != as->type()) {
+ if (as->type() == MIRType::Int32)
+ replacement = MConstant::New(alloc(), BooleanValue(as->toConstant()->toInt32()));
+ else
+ replacement = MConstant::New(alloc(), Int32Value(as->toConstant()->toBoolean()));
+ def->block()->insertBefore(def->toInstruction(), replacement);
+ emitAtUses(replacement->toInstruction());
+ } else {
+ replacement = as->toInstruction();
+ }
+ def->replaceAllUsesWith(replacement);
+ } else {
+ ensureDefined(as);
+ def->setVirtualRegister(as->virtualRegister());
+
+#ifdef DEBUG
+ if (JitOptions.runExtraChecks &&
+ def->resultTypeSet() && as->resultTypeSet() &&
+ !def->resultTypeSet()->equals(as->resultTypeSet()))
+ {
+ switch (def->type()) {
+ case MIRType::Object:
+ case MIRType::ObjectOrNull:
+ case MIRType::String:
+ case MIRType::Symbol: {
+ LAssertResultT* check = new(alloc()) LAssertResultT(useRegister(def));
+ add(check, def->toInstruction());
+ break;
+ }
+ case MIRType::Value: {
+ LAssertResultV* check = new(alloc()) LAssertResultV(useBox(def));
+ add(check, def->toInstruction());
+ break;
+ }
+ default:
+ break;
+ }
+ }
+#endif
+ }
+}
+
+void
+LIRGeneratorShared::ensureDefined(MDefinition* mir)
+{
+ if (mir->isEmittedAtUses()) {
+ mir->toInstruction()->accept(this);
+ MOZ_ASSERT(mir->isLowered());
+ }
+}
+
+LUse
+LIRGeneratorShared::useRegister(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::REGISTER));
+}
+
+LUse
+LIRGeneratorShared::useRegisterAtStart(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::REGISTER, true));
+}
+
+LUse
+LIRGeneratorShared::use(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::ANY));
+}
+
+LUse
+LIRGeneratorShared::useAtStart(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::ANY, true));
+}
+
+LAllocation
+LIRGeneratorShared::useOrConstant(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return use(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useOrConstantAtStart(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrConstant(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrConstantAtStart(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrZeroAtStart(MDefinition* mir)
+{
+ if (mir->isConstant() && mir->toConstant()->isInt32(0))
+ return LAllocation();
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ if (mir->isConstant() && mir->type() != MIRType::Double && mir->type() != MIRType::Float32)
+ return LAllocation(mir->toConstant());
+ return useRegister(mir);
+}
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+LAllocation
+LIRGeneratorShared::useAnyOrConstant(MDefinition* mir)
+{
+ return useRegisterOrConstant(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorable(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorableAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useAny(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+#else
+LAllocation
+LIRGeneratorShared::useAnyOrConstant(MDefinition* mir)
+{
+ return useOrConstant(mir);
+}
+
+LAllocation
+LIRGeneratorShared::useAny(MDefinition* mir)
+{
+ return use(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorable(MDefinition* mir)
+{
+ return useRegisterOrConstant(mir);
+}
+LAllocation
+LIRGeneratorShared::useStorableAtStart(MDefinition* mir)
+{
+ return useRegisterOrConstantAtStart(mir);
+}
+
+#endif
+
+LAllocation
+LIRGeneratorShared::useKeepalive(MDefinition* mir)
+{
+ return use(mir, LUse(LUse::KEEPALIVE));
+}
+
+LAllocation
+LIRGeneratorShared::useKeepaliveOrConstant(MDefinition* mir)
+{
+ if (mir->isConstant())
+ return LAllocation(mir->toConstant());
+ return useKeepalive(mir);
+}
+
+LUse
+LIRGeneratorShared::useFixed(MDefinition* mir, Register reg)
+{
+ return use(mir, LUse(reg));
+}
+
+LUse
+LIRGeneratorShared::useFixedAtStart(MDefinition* mir, Register reg)
+{
+ return use(mir, LUse(reg, true));
+}
+
+LUse
+LIRGeneratorShared::useFixed(MDefinition* mir, FloatRegister reg)
+{
+ return use(mir, LUse(reg));
+}
+
+LUse
+LIRGeneratorShared::useFixed(MDefinition* mir, AnyRegister reg)
+{
+ return reg.isFloat() ? use(mir, LUse(reg.fpu())) : use(mir, LUse(reg.gpr()));
+}
+
+LUse
+LIRGeneratorShared::useFixedAtStart(MDefinition* mir, AnyRegister reg)
+{
+ return reg.isFloat() ? use(mir, LUse(reg.fpu(), true)) : use(mir, LUse(reg.gpr(), true));
+}
+
+LDefinition
+LIRGeneratorShared::temp(LDefinition::Type type, LDefinition::Policy policy)
+{
+ return LDefinition(getVirtualRegister(), type, policy);
+}
+
+LInt64Definition
+LIRGeneratorShared::tempInt64(LDefinition::Policy policy)
+{
+#if JS_BITS_PER_WORD == 32
+ LDefinition high = temp(LDefinition::GENERAL, policy);
+ LDefinition low = temp(LDefinition::GENERAL, policy);
+ return LInt64Definition(high, low);
+#else
+ return LInt64Definition(temp(LDefinition::GENERAL, policy));
+#endif
+}
+
+LDefinition
+LIRGeneratorShared::tempFixed(Register reg)
+{
+ LDefinition t = temp(LDefinition::GENERAL);
+ t.setOutput(LGeneralReg(reg));
+ return t;
+}
+
+LDefinition
+LIRGeneratorShared::tempFloat32()
+{
+ return temp(LDefinition::FLOAT32);
+}
+
+LDefinition
+LIRGeneratorShared::tempDouble()
+{
+ return temp(LDefinition::DOUBLE);
+}
+
+LDefinition
+LIRGeneratorShared::tempCopy(MDefinition* input, uint32_t reusedInput)
+{
+ MOZ_ASSERT(input->virtualRegister());
+ LDefinition t = temp(LDefinition::TypeFrom(input->type()), LDefinition::MUST_REUSE_INPUT);
+ t.setReusedInput(reusedInput);
+ return t;
+}
+
+template <typename T> void
+LIRGeneratorShared::annotate(T* ins)
+{
+ ins->setId(lirGraph_.getInstructionId());
+}
+
+template <typename T> void
+LIRGeneratorShared::add(T* ins, MInstruction* mir)
+{
+ MOZ_ASSERT(!ins->isPhi());
+ current->add(ins);
+ if (mir) {
+ MOZ_ASSERT(current == mir->block()->lir());
+ ins->setMir(mir);
+ }
+ annotate(ins);
+}
+
+#ifdef JS_NUNBOX32
+// Returns the virtual register of a js::Value-defining instruction. This is
+// abstracted because MBox is a special value-returning instruction that
+// redefines its input payload if its input is not constant. Therefore, it is
+// illegal to request a box's payload by adding VREG_DATA_OFFSET to its raw id.
+static inline uint32_t
+VirtualRegisterOfPayload(MDefinition* mir)
+{
+ if (mir->isBox()) {
+ MDefinition* inner = mir->toBox()->getOperand(0);
+ if (!inner->isConstant() && inner->type() != MIRType::Double && inner->type() != MIRType::Float32)
+ return inner->virtualRegister();
+ }
+ if (mir->isTypeBarrier())
+ return VirtualRegisterOfPayload(mir->getOperand(0));
+ return mir->virtualRegister() + VREG_DATA_OFFSET;
+}
+
+// Note: always call ensureDefined before calling useType/usePayload,
+// so that emitted-at-use operands are handled correctly.
+LUse
+LIRGeneratorShared::useType(MDefinition* mir, LUse::Policy policy)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(mir->virtualRegister() + VREG_TYPE_OFFSET, policy);
+}
+
+LUse
+LIRGeneratorShared::usePayload(MDefinition* mir, LUse::Policy policy)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy);
+}
+
+LUse
+LIRGeneratorShared::usePayloadAtStart(MDefinition* mir, LUse::Policy policy)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy, true);
+}
+
+LUse
+LIRGeneratorShared::usePayloadInRegisterAtStart(MDefinition* mir)
+{
+ return usePayloadAtStart(mir, LUse::REGISTER);
+}
+
+void
+LIRGeneratorShared::fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir)
+{
+ ensureDefined(mir);
+ lir->getOperand(n)->toUse()->setVirtualRegister(mir->virtualRegister() + VREG_TYPE_OFFSET);
+ lir->getOperand(n + 1)->toUse()->setVirtualRegister(VirtualRegisterOfPayload(mir));
+}
+#endif
+
+LUse
+LIRGeneratorShared::useRegisterForTypedLoad(MDefinition* mir, MIRType type)
+{
+ MOZ_ASSERT(type != MIRType::Value && type != MIRType::None);
+ MOZ_ASSERT(mir->type() == MIRType::Object || mir->type() == MIRType::Slots);
+
+#ifdef JS_PUNBOX64
+ // On x64, masm.loadUnboxedValue emits slightly less efficient code when
+ // the input and output use the same register and we're not loading an
+ // int32/bool/double, so we just call useRegister in this case.
+ if (type != MIRType::Int32 && type != MIRType::Boolean && type != MIRType::Double)
+ return useRegister(mir);
+#endif
+
+ return useRegisterAtStart(mir);
+}
+
+LBoxAllocation
+LIRGeneratorShared::useBox(MDefinition* mir, LUse::Policy policy, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(LUse(mir->virtualRegister(), policy, useAtStart),
+ LUse(VirtualRegisterOfPayload(mir), policy, useAtStart));
+#else
+ return LBoxAllocation(LUse(mir->virtualRegister(), policy, useAtStart));
+#endif
+}
+
+LBoxAllocation
+LIRGeneratorShared::useBoxOrTypedOrConstant(MDefinition* mir, bool useConstant)
+{
+ if (mir->type() == MIRType::Value)
+ return useBox(mir);
+
+
+ if (useConstant && mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LBoxAllocation(LAllocation(mir->toConstant()));
+#endif
+ }
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(useRegister(mir), LAllocation());
+#else
+ return LBoxAllocation(useRegister(mir));
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64(MDefinition* mir, LUse::Policy policy, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(vreg + INT64HIGH_INDEX, policy, useAtStart),
+ LUse(vreg + INT64LOW_INDEX, policy, useAtStart));
+#else
+ return LInt64Allocation(LUse(vreg, policy, useAtStart));
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(regs.high, vreg + INT64HIGH_INDEX, useAtStart),
+ LUse(regs.low, vreg + INT64LOW_INDEX, useAtStart));
+#else
+ return LInt64Allocation(LUse(regs.reg, vreg, useAtStart));
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64(MDefinition* mir, bool useAtStart)
+{
+ // On 32-bit platforms, always load the value in registers.
+#if JS_BITS_PER_WORD == 32
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+#else
+ return useInt64(mir, LUse::ANY, useAtStart);
+#endif
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64AtStart(MDefinition* mir)
+{
+ return useInt64(mir, /* useAtStart = */ true);
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64Register(MDefinition* mir, bool useAtStart)
+{
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64OrConstant(MDefinition* mir, bool useAtStart)
+{
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64(mir, useAtStart);
+}
+
+LInt64Allocation
+LIRGeneratorShared::useInt64RegisterOrConstant(MDefinition* mir, bool useAtStart)
+{
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64Register(mir, useAtStart);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_inl_h */
diff --git a/js/src/jit/shared/Lowering-shared.cpp b/js/src/jit/shared/Lowering-shared.cpp
new file mode 100644
index 000000000..4d313491d
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -0,0 +1,306 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+#include "jit/LIR.h"
+#include "jit/MIR.h"
+
+#include "vm/Symbol.h"
+
+using namespace js;
+using namespace jit;
+
+bool
+LIRGeneratorShared::ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs, MInstruction* ins)
+{
+ // lhs and rhs are used by the commutative operator.
+ MOZ_ASSERT(lhs->hasDefUses());
+ MOZ_ASSERT(rhs->hasDefUses());
+
+ // Ensure that if there is a constant, then it is in rhs.
+ if (rhs->isConstant())
+ return false;
+ if (lhs->isConstant())
+ return true;
+
+ // Since clobbering binary operations clobber the left operand, prefer a
+ // non-constant lhs operand with no further uses. To be fully precise, we
+ // should check whether this is the *last* use, but checking hasOneDefUse()
+ // is a decent approximation which doesn't require any extra analysis.
+ bool rhsSingleUse = rhs->hasOneDefUse();
+ bool lhsSingleUse = lhs->hasOneDefUse();
+ if (rhsSingleUse) {
+ if (!lhsSingleUse)
+ return true;
+ } else {
+ if (lhsSingleUse)
+ return false;
+ }
+
+ // If this is a reduction-style computation, such as
+ //
+ // sum = 0;
+ // for (...)
+ // sum += ...;
+ //
+ // put the phi on the left to promote coalescing. This is fairly specific.
+ if (rhsSingleUse &&
+ rhs->isPhi() &&
+ rhs->block()->isLoopHeader() &&
+ ins == rhs->toPhi()->getLoopBackedgeOperand())
+ {
+ return true;
+ }
+
+ return false;
+}
+
+void
+LIRGeneratorShared::ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp, MInstruction* ins)
+{
+ MDefinition* lhs = *lhsp;
+ MDefinition* rhs = *rhsp;
+
+ if (ShouldReorderCommutative(lhs, rhs, ins)) {
+ *rhsp = lhs;
+ *lhsp = rhs;
+ }
+}
+
+void
+LIRGeneratorShared::visitConstant(MConstant* ins)
+{
+ if (!IsFloatingPointType(ins->type()) && ins->canEmitAtUses()) {
+ emitAtUses(ins);
+ return;
+ }
+
+ switch (ins->type()) {
+ case MIRType::Double:
+ define(new(alloc()) LDouble(ins->toRawF64()), ins);
+ break;
+ case MIRType::Float32:
+ define(new(alloc()) LFloat32(ins->toRawF32()), ins);
+ break;
+ case MIRType::Boolean:
+ define(new(alloc()) LInteger(ins->toBoolean()), ins);
+ break;
+ case MIRType::Int32:
+ define(new(alloc()) LInteger(ins->toInt32()), ins);
+ break;
+ case MIRType::Int64:
+ defineInt64(new(alloc()) LInteger64(ins->toInt64()), ins);
+ break;
+ case MIRType::String:
+ define(new(alloc()) LPointer(ins->toString()), ins);
+ break;
+ case MIRType::Symbol:
+ define(new(alloc()) LPointer(ins->toSymbol()), ins);
+ break;
+ case MIRType::Object:
+ define(new(alloc()) LPointer(&ins->toObject()), ins);
+ break;
+ default:
+ // Constants of special types (undefined, null) should never flow into
+ // here directly. Operations blindly consuming them require a Box.
+ MOZ_CRASH("unexpected constant type");
+ }
+}
+
+void
+LIRGeneratorShared::defineTypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* lir = current->getPhi(lirIndex);
+
+ uint32_t vreg = getVirtualRegister();
+
+ phi->setVirtualRegister(vreg);
+ lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type())));
+ annotate(lir);
+}
+
+void
+LIRGeneratorShared::lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* lir = block->getPhi(lirIndex);
+ lir->setOperand(inputPosition, LUse(operand->virtualRegister(), LUse::ANY));
+}
+
+LRecoverInfo*
+LIRGeneratorShared::getRecoverInfo(MResumePoint* rp)
+{
+ if (cachedRecoverInfo_ && cachedRecoverInfo_->mir() == rp)
+ return cachedRecoverInfo_;
+
+ LRecoverInfo* recoverInfo = LRecoverInfo::New(gen, rp);
+ if (!recoverInfo)
+ return nullptr;
+
+ cachedRecoverInfo_ = recoverInfo;
+ return recoverInfo;
+}
+
+#ifdef DEBUG
+bool
+LRecoverInfo::OperandIter::canOptimizeOutIfUnused()
+{
+ MDefinition* ins = **this;
+
+ // We check ins->type() in addition to ins->isUnused() because
+ // EliminateDeadResumePointOperands may replace nodes with the constant
+ // MagicValue(JS_OPTIMIZED_OUT).
+ if ((ins->isUnused() || ins->type() == MIRType::MagicOptimizedOut) &&
+ (*it_)->isResumePoint())
+ {
+ return !(*it_)->toResumePoint()->isObservableOperand(op_);
+ }
+
+ return true;
+}
+#endif
+
+#ifdef JS_NUNBOX32
+LSnapshot*
+LIRGeneratorShared::buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind)
+{
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo)
+ return nullptr;
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot)
+ return nullptr;
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* ins = *it;
+
+ if (ins->isRecoveredOnBailout())
+ continue;
+
+ LAllocation* type = snapshot->typeOfSlot(index);
+ LAllocation* payload = snapshot->payloadOfSlot(index);
+ ++index;
+
+ if (ins->isBox())
+ ins = ins->toBox()->getOperand(0);
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
+
+ // The register allocation will fill these fields in with actual
+ // register/stack assignments. During code generation, we can restore
+ // interpreter state with the given information. Note that for
+ // constants, including known types, we record a dummy placeholder,
+ // since we can recover the same information, much cleaner, from MIR.
+ if (ins->isConstant() || ins->isUnused()) {
+ *type = LAllocation();
+ *payload = LAllocation();
+ } else if (ins->type() != MIRType::Value) {
+ *type = LAllocation();
+ *payload = use(ins, LUse(LUse::KEEPALIVE));
+ } else {
+ *type = useType(ins, LUse::KEEPALIVE);
+ *payload = usePayload(ins, LUse::KEEPALIVE);
+ }
+ }
+
+ return snapshot;
+}
+
+#elif JS_PUNBOX64
+
+LSnapshot*
+LIRGeneratorShared::buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind)
+{
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo)
+ return nullptr;
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot)
+ return nullptr;
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* def = *it;
+
+ if (def->isRecoveredOnBailout())
+ continue;
+
+ if (def->isBox())
+ def = def->toBox()->getOperand(0);
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(def->isUnused(), !def->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
+
+ LAllocation* a = snapshot->getEntry(index++);
+
+ if (def->isUnused()) {
+ *a = LAllocation();
+ continue;
+ }
+
+ *a = useKeepaliveOrConstant(def);
+ }
+
+ return snapshot;
+}
+#endif
+
+void
+LIRGeneratorShared::assignSnapshot(LInstruction* ins, BailoutKind kind)
+{
+ // assignSnapshot must be called before define/add, since
+ // it may add new instructions for emitted-at-use operands.
+ MOZ_ASSERT(ins->id() == 0);
+
+ LSnapshot* snapshot = buildSnapshot(ins, lastResumePoint_, kind);
+ if (snapshot)
+ ins->assignSnapshot(snapshot);
+ else
+ gen->abort("buildSnapshot failed");
+}
+
+void
+LIRGeneratorShared::assignSafepoint(LInstruction* ins, MInstruction* mir, BailoutKind kind)
+{
+ MOZ_ASSERT(!osiPoint_);
+ MOZ_ASSERT(!ins->safepoint());
+
+ ins->initSafepoint(alloc());
+
+ MResumePoint* mrp = mir->resumePoint() ? mir->resumePoint() : lastResumePoint_;
+ LSnapshot* postSnapshot = buildSnapshot(ins, mrp, kind);
+ if (!postSnapshot) {
+ gen->abort("buildSnapshot failed");
+ return;
+ }
+
+ osiPoint_ = new(alloc()) LOsiPoint(ins->safepoint(), postSnapshot);
+
+ if (!lirGraph_.noteNeedsSafepoint(ins))
+ gen->abort("noteNeedsSafepoint failed");
+}
+
diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h
new file mode 100644
index 000000000..e73df780e
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -0,0 +1,296 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_h
+#define jit_shared_Lowering_shared_h
+
+// This file declares the structures that are used for attaching LIR to a
+// MIRGraph.
+
+#include "jit/LIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+class MDefinition;
+class MInstruction;
+class LOsiPoint;
+
+class LIRGeneratorShared : public MDefinitionVisitor
+{
+ protected:
+ MIRGenerator* gen;
+ MIRGraph& graph;
+ LIRGraph& lirGraph_;
+ LBlock* current;
+ MResumePoint* lastResumePoint_;
+ LRecoverInfo* cachedRecoverInfo_;
+ LOsiPoint* osiPoint_;
+
+ public:
+ LIRGeneratorShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : gen(gen),
+ graph(graph),
+ lirGraph_(lirGraph),
+ lastResumePoint_(nullptr),
+ cachedRecoverInfo_(nullptr),
+ osiPoint_(nullptr)
+ { }
+
+ MIRGenerator* mir() {
+ return gen;
+ }
+
+ protected:
+
+ static void ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp, MInstruction* ins);
+ static bool ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs, MInstruction* ins);
+
+ // A backend can decide that an instruction should be emitted at its uses,
+ // rather than at its definition. To communicate this, set the
+ // instruction's virtual register set to 0. When using the instruction,
+ // its virtual register is temporarily reassigned. To know to clear it
+ // after constructing the use information, the worklist bit is temporarily
+ // unset.
+ //
+ // The backend can use the worklist bit to determine whether or not a
+ // definition should be created.
+ inline void emitAtUses(MInstruction* mir);
+
+ // The lowest-level calls to use, those that do not wrap another call to
+ // use(), must prefix grabbing virtual register IDs by these calls.
+ inline void ensureDefined(MDefinition* mir);
+
+ // These all create a use of a virtual register, with an optional
+ // allocation policy.
+ //
+ // Some of these use functions have atStart variants.
+ // - non-atStart variants will tell the register allocator that the input
+ // allocation must be different from any Temp or Definition also needed for
+ // this LInstruction.
+ // - atStart variants relax that restriction and allow the input to be in
+ // the same register as any Temp or output Definition used by the
+ // LInstruction. Note that it doesn't *imply* this will actually happen,
+ // but gives a hint to the register allocator that it can do it.
+ //
+ // TL;DR: Use non-atStart variants only if you need the input value after
+ // writing to any temp or definitions, during code generation of this
+ // LInstruction. Otherwise, use atStart variants, which will lower register
+ // pressure.
+ inline LUse use(MDefinition* mir, LUse policy);
+ inline LUse use(MDefinition* mir);
+ inline LUse useAtStart(MDefinition* mir);
+ inline LUse useRegister(MDefinition* mir);
+ inline LUse useRegisterAtStart(MDefinition* mir);
+ inline LUse useFixed(MDefinition* mir, Register reg);
+ inline LUse useFixed(MDefinition* mir, FloatRegister reg);
+ inline LUse useFixed(MDefinition* mir, AnyRegister reg);
+ inline LUse useFixedAtStart(MDefinition* mir, Register reg);
+ inline LUse useFixedAtStart(MDefinition* mir, AnyRegister reg);
+ inline LAllocation useOrConstant(MDefinition* mir);
+ inline LAllocation useOrConstantAtStart(MDefinition* mir);
+ // "Any" is architecture dependent, and will include registers and stack
+ // slots on X86, and only registers on ARM.
+ inline LAllocation useAny(MDefinition* mir);
+ inline LAllocation useAnyOrConstant(MDefinition* mir);
+ // "Storable" is architecture dependend, and will include registers and
+ // constants on X86 and only registers on ARM. This is a generic "things
+ // we can expect to write into memory in 1 instruction".
+ inline LAllocation useStorable(MDefinition* mir);
+ inline LAllocation useStorableAtStart(MDefinition* mir);
+ inline LAllocation useKeepalive(MDefinition* mir);
+ inline LAllocation useKeepaliveOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstantAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrZeroAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ inline LUse useRegisterForTypedLoad(MDefinition* mir, MIRType type);
+
+#ifdef JS_NUNBOX32
+ inline LUse useType(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayload(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadAtStart(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadInRegisterAtStart(MDefinition* mir);
+
+ // Adds a box input to an instruction, setting operand |n| to the type and
+ // |n+1| to the payload. Does not modify the operands, instead expecting a
+ // policy to already be set.
+ inline void fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir);
+#endif
+
+ // These create temporary register requests.
+ inline LDefinition temp(LDefinition::Type type = LDefinition::GENERAL,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LInt64Definition tempInt64(LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LDefinition tempFloat32();
+ inline LDefinition tempDouble();
+ inline LDefinition tempCopy(MDefinition* input, uint32_t reusedInput);
+
+ // Note that the fixed register has a GENERAL type.
+ inline LDefinition tempFixed(Register reg);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineFixed(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
+ const LAllocation& output);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineBox(LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64Fixed(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineSinCos(LInstructionHelper<2, Ops, Temps> *lir, MDefinition *mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ inline void defineSharedStubReturn(LInstruction* lir, MDefinition* mir);
+ inline void defineReturn(LInstruction* lir, MDefinition* mir);
+
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ const LDefinition& def);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64ReuseInput(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
+ MDefinition* mir, uint32_t operand);
+
+ // Returns a box allocation for a Value-typed instruction.
+ inline LBoxAllocation useBox(MDefinition* mir, LUse::Policy policy = LUse::REGISTER,
+ bool useAtStart = false);
+
+ // Returns a box allocation. The use is either typed, a Value, or
+ // a constant (if useConstant is true).
+ inline LBoxAllocation useBoxOrTypedOrConstant(MDefinition* mir, bool useConstant);
+
+ // Returns an int64 allocation for an Int64-typed instruction.
+ inline LInt64Allocation useInt64(MDefinition* mir, LUse::Policy policy, bool useAtStart);
+ inline LInt64Allocation useInt64(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64AtStart(MDefinition* mir);
+ inline LInt64Allocation useInt64OrConstant(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64Register(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtStart = false);
+
+ LInt64Allocation useInt64RegisterAtStart(MDefinition* mir) {
+ return useInt64Register(mir, /* useAtStart = */ true);
+ }
+ LInt64Allocation useInt64RegisterOrConstantAtStart(MDefinition* mir) {
+ return useInt64RegisterOrConstant(mir, /* useAtStart = */ true);
+ }
+ LInt64Allocation useInt64OrConstantAtStart(MDefinition* mir) {
+ return useInt64OrConstant(mir, /* useAtStart = */ true);
+ }
+
+ // Rather than defining a new virtual register, sets |ins| to have the same
+ // virtual register as |as|.
+ inline void redefine(MDefinition* ins, MDefinition* as);
+
+ // Redefine a sin/cos call to sincos.
+ inline void redefine(MDefinition* def, MDefinition* as, MMathFunction::Function func);
+
+ TempAllocator& alloc() const {
+ return graph.alloc();
+ }
+
+ uint32_t getVirtualRegister() {
+ uint32_t vreg = lirGraph_.getVirtualRegister();
+
+ // If we run out of virtual registers, mark code generation as having
+ // failed and return a dummy vreg. Include a + 1 here for NUNBOX32
+ // platforms that expect Value vregs to be adjacent.
+ if (vreg + 1 >= MAX_VIRTUAL_REGISTERS) {
+ gen->abort("max virtual registers");
+ return 1;
+ }
+ return vreg;
+ }
+
+ template <typename T> void annotate(T* ins);
+ template <typename T> void add(T* ins, MInstruction* mir = nullptr);
+
+ void lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineTypedPhi(MPhi* phi, size_t lirIndex);
+
+ LOsiPoint* popOsiPoint() {
+ LOsiPoint* tmp = osiPoint_;
+ osiPoint_ = nullptr;
+ return tmp;
+ }
+
+ LRecoverInfo* getRecoverInfo(MResumePoint* rp);
+ LSnapshot* buildSnapshot(LInstruction* ins, MResumePoint* rp, BailoutKind kind);
+ bool assignPostSnapshot(MInstruction* mir, LInstruction* ins);
+
+ // Marks this instruction as fallible, meaning that before it performs
+ // effects (if any), it may check pre-conditions and bailout if they do not
+ // hold. This function informs the register allocator that it will need to
+ // capture appropriate state.
+ void assignSnapshot(LInstruction* ins, BailoutKind kind);
+
+ // Marks this instruction as needing to call into either the VM or GC. This
+ // function may build a snapshot that captures the result of its own
+ // instruction, and as such, should generally be called after define*().
+ void assignSafepoint(LInstruction* ins, MInstruction* mir,
+ BailoutKind kind = Bailout_DuringVMCall);
+
+ public:
+ void lowerConstantDouble(double d, MInstruction* mir) {
+ define(new(alloc()) LDouble(wasm::RawF64(d)), mir);
+ }
+ void lowerConstantFloat32(float f, MInstruction* mir) {
+ define(new(alloc()) LFloat32(wasm::RawF32(f)), mir);
+ }
+
+ void visitConstant(MConstant* ins) override;
+
+ // Whether to generate typed reads for element accesses with hole checks.
+ static bool allowTypedElementHoleCheck() {
+ return false;
+ }
+
+ // Whether to generate typed array accesses on statically known objects.
+ static bool allowStaticTypedArrayAccesses() {
+ return false;
+ }
+
+ // Provide NYI default implementations of the SIMD visitor functions.
+ // Many targets don't implement SIMD at all, and we don't want to duplicate
+ // these stubs in the specific sub-classes.
+ // Some SIMD visitors are implemented in LIRGenerator in Lowering.cpp. These
+ // shared implementations are not included here.
+ void visitSimdInsertElement(MSimdInsertElement*) override { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElement(MSimdExtractElement*) override { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArith(MSimdBinaryArith*) override { MOZ_CRASH("NYI"); }
+ void visitSimdSelect(MSimdSelect*) override { MOZ_CRASH("NYI"); }
+ void visitSimdSplat(MSimdSplat*) override { MOZ_CRASH("NYI"); }
+ void visitSimdValueX4(MSimdValueX4*) override { MOZ_CRASH("NYI"); }
+ void visitSimdBinarySaturating(MSimdBinarySaturating*) override { MOZ_CRASH("NYI"); }
+ void visitSimdSwizzle(MSimdSwizzle*) override { MOZ_CRASH("NYI"); }
+ void visitSimdShuffle(MSimdShuffle*) override { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffle(MSimdGeneralShuffle*) override { MOZ_CRASH("NYI"); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_h */