summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips-shared
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/jit/mips-shared
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/mips-shared')
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.cpp77
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.h338
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.cpp1746
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h1522
-rw-r--r--js/src/jit/mips-shared/AtomicOperations-mips-shared.h241
-rw-r--r--js/src/jit/mips-shared/Bailouts-mips-shared.cpp24
-rw-r--r--js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp16
-rw-r--r--js/src/jit/mips-shared/BaselineCompiler-mips-shared.h24
-rw-r--r--js/src/jit/mips-shared/BaselineIC-mips-shared.cpp39
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp2931
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.h301
-rw-r--r--js/src/jit/mips-shared/LIR-mips-shared.h408
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.cpp753
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.h108
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h1030
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp1728
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h262
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp223
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.h76
-rw-r--r--js/src/jit/mips-shared/SharedICHelpers-mips-shared.h382
20 files changed, 12229 insertions, 0 deletions
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.cpp b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
new file mode 100644
index 000000000..4fcf2c90e
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "jit/RegisterSets.h"
+
+#define HWCAP_MIPS (1 << 28)
+#define HWCAP_LOONGSON (1 << 27)
+#define HWCAP_FPU (1 << 0)
+
+namespace js {
+namespace jit {
+
+static uint32_t
+get_mips_flags()
+{
+ uint32_t flags = HWCAP_MIPS;
+
+#if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ flags |= HWCAP_FPU;
+#else
+# ifdef __linux__
+ FILE* fp = fopen("/proc/cpuinfo", "r");
+ if (!fp)
+ return flags;
+
+ char buf[1024];
+ memset(buf, 0, sizeof(buf));
+ fread(buf, sizeof(char), sizeof(buf) - 1, fp);
+ fclose(fp);
+ if (strstr(buf, "FPU"))
+ flags |= HWCAP_FPU;
+ if (strstr(buf, "Loongson"))
+ flags |= HWCAP_LOONGSON;
+# endif
+#endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64
+ return flags;
+}
+
+static bool check_fpu()
+{
+ return mips_private::Flags & HWCAP_FPU;
+}
+
+static bool check_loongson()
+{
+ return mips_private::Flags & HWCAP_LOONGSON;
+}
+
+namespace mips_private {
+ // Cache a local copy so we only have to read /proc/cpuinfo once.
+ uint32_t Flags = get_mips_flags();
+ bool hasFPU = check_fpu();;
+ bool isLoongson = check_loongson();
+}
+
+Registers::Code
+Registers::FromName(const char* name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+} // namespace ion
+} // namespace js
+
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.h b/js/src/jit/mips-shared/Architecture-mips-shared.h
new file mode 100644
index 000000000..7afe30594
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.h
@@ -0,0 +1,338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Architecture_mips_shared_h
+#define jit_mips_shared_Architecture_mips_shared_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "js/Utility.h"
+
+// gcc appears to use _mips_hard_float to denote
+// that the target is a hard-float target.
+#ifdef _mips_hard_float
+#define JS_CODEGEN_MIPS_HARDFP
+#endif
+
+#if (defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32)) || defined(JS_SIMULATOR_MIPS32)
+#define USES_O32_ABI
+#elif (defined(_MIPS_SIM) && (_MIPS_SIM == _ABI64)) || defined(JS_SIMULATOR_MIPS64)
+#define USES_N64_ABI
+#else
+#error "Unsupported ABI"
+#endif
+
+namespace js {
+namespace jit {
+
+// How far forward/back can a jump go? Provide a generous buffer for thunks.
+static const uint32_t JumpImmediateRange = UINT32_MAX;
+
+class Registers
+{
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+#if defined(USES_O32_ABI)
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ ta0 = t4,
+ ta1 = t5,
+ ta2 = t6,
+ ta3 = t7,
+#elif defined(USES_N64_ABI)
+ a4 = r8,
+ a5 = r9,
+ a6 = r10,
+ a7 = r11,
+ t0 = r12,
+ t1 = r13,
+ t2 = r14,
+ t3 = r15,
+ ta0 = a4,
+ ta1 = a5,
+ ta2 = a6,
+ ta3 = a7,
+#endif
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static const char * const RegNames[];
+ static const char* GetName(Code code) {
+ MOZ_ASSERT(code < Total);
+ return RegNames[code];
+ }
+ static const char* GetName(Encoding i) {
+ return GetName(Code(i));
+ }
+
+ static Code FromName(const char* name);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ static const uint32_t Total = 32;
+ static const uint32_t Allocatable;
+
+ typedef uint32_t SetType;
+ static const SetType AllMask = 0xffffffff;
+ static const SetType SharedArgRegMask = (1 << a0) | (1 << a1) | (1 << a2) | (1 << a3);
+ static const SetType ArgRegMask;
+
+ static const SetType VolatileMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1) |
+ (1 << Registers::a0) |
+ (1 << Registers::a1) |
+ (1 << Registers::a2) |
+ (1 << Registers::a3) |
+ (1 << Registers::t0) |
+ (1 << Registers::t1) |
+ (1 << Registers::t2) |
+ (1 << Registers::t3) |
+ (1 << Registers::ta0) |
+ (1 << Registers::ta1) |
+ (1 << Registers::ta2) |
+ (1 << Registers::ta3);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::s0) |
+ (1 << Registers::s1) |
+ (1 << Registers::s2) |
+ (1 << Registers::s3) |
+ (1 << Registers::s4) |
+ (1 << Registers::s5) |
+ (1 << Registers::s6) |
+ (1 << Registers::s7) |
+ (1 << Registers::ra);
+
+ static const SetType WrapperMask =
+ VolatileMask | // = arguments
+ (1 << Registers::t0) | // = outReg
+ (1 << Registers::t1); // = argBase
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) |
+ (1 << Registers::at) | // at = scratch
+ (1 << Registers::t8) | // t8 = scratch
+ (1 << Registers::t9) | // t9 = scratch
+ (1 << Registers::k0) |
+ (1 << Registers::k1) |
+ (1 << Registers::gp) |
+ (1 << Registers::sp) |
+ (1 << Registers::fp) |
+ (1 << Registers::ra);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask;
+
+ // Registers returned from a JS -> C call.
+ static const SetType SharedCallMask = (1 << Registers::v0);
+ static const SetType CallMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+class FloatRegistersMIPSShared
+{
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_freg
+ };
+ typedef FPRegisterID Code;
+ typedef FPRegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ double d;
+ };
+
+ static const char* GetName(Code code) {
+ static const char * const Names[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13",
+ "f14", "f15", "f16", "f17", "f18", "f19",
+ "f20", "f21", "f22", "f23", "f24", "f25",
+ "f26", "f27", "f28", "f29", "f30", "f31"};
+ return Names[code];
+ }
+
+ static const Code Invalid = invalid_freg;
+
+ typedef uint64_t SetType;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisterMIPSShared
+{
+ public:
+ bool isSimd128() const { return false; }
+
+ typedef FloatRegistersMIPSShared::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+};
+
+namespace mips_private {
+ extern uint32_t Flags;
+ extern bool hasFPU;
+ extern bool isLoongson;
+}
+
+inline uint32_t GetMIPSFlags() { return mips_private::Flags; }
+inline bool hasFPU() { return mips_private::hasFPU; }
+inline bool isLoongson() { return mips_private::isLoongson; }
+
+// MIPS doesn't have double registers that can NOT be treated as float32.
+inline bool
+hasUnaliasedDouble() {
+ return false;
+}
+
+// On MIPS, fn-double aliases both fn-float32 and fn+1-float32, so if you need
+// to convert a float32 to a double as a temporary, you need a temporary
+// double register.
+inline bool
+hasMultiAlias() {
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Architecture_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
new file mode 100644
index 000000000..f813eb946
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -0,0 +1,1746 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscompartment.h"
+#include "jsutil.h"
+
+#include "gc/Marking.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/JitCompartment.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+// Encode a standard register when it is being used as rd, the rs, and
+// an extra register(rt). These should never be called with an InvalidReg.
+uint32_t
+js::jit::RS(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RSShift;
+}
+
+uint32_t
+js::jit::RT(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RTShift;
+}
+
+uint32_t
+js::jit::RD(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RDShift;
+}
+
+uint32_t
+js::jit::RZ(Register r)
+{
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RZShift;
+}
+
+uint32_t
+js::jit::SA(uint32_t value)
+{
+ MOZ_ASSERT(value < 32);
+ return value << SAShift;
+}
+
+Register
+js::jit::toRS(Instruction& i)
+{
+ return Register::FromCode((i.encode() & RSMask ) >> RSShift);
+}
+
+Register
+js::jit::toRT(Instruction& i)
+{
+ return Register::FromCode((i.encode() & RTMask ) >> RTShift);
+}
+
+Register
+js::jit::toRD(Instruction& i)
+{
+ return Register::FromCode((i.encode() & RDMask ) >> RDShift);
+}
+
+Register
+js::jit::toR(Instruction& i)
+{
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void
+InstImm::extractImm16(BOffImm16* dest)
+{
+ *dest = BOffImm16(*this);
+}
+
+void
+AssemblerMIPSShared::finish()
+{
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool
+AssemblerMIPSShared::asmMergeWith(const AssemblerMIPSShared& other)
+{
+ if (!AssemblerShared::asmMergeWith(size(), other))
+ return false;
+ for (size_t i = 0; i < other.numLongJumps(); i++) {
+ size_t off = other.longJumps_[i];
+ addLongJump(BufferOffset(size() + off));
+ }
+ return m_buffer.appendBuffer(other.m_buffer);
+}
+
+uint32_t
+AssemblerMIPSShared::actualIndex(uint32_t idx_) const
+{
+ return idx_;
+}
+
+uint8_t*
+AssemblerMIPSShared::PatchableJumpAddress(JitCode* code, uint32_t pe_)
+{
+ return code->raw() + pe_;
+}
+
+void
+AssemblerMIPSShared::copyJumpRelocationTable(uint8_t* dest)
+{
+ if (jumpRelocations_.length())
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+}
+
+void
+AssemblerMIPSShared::copyDataRelocationTable(uint8_t* dest)
+{
+ if (dataRelocations_.length())
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+}
+
+void
+AssemblerMIPSShared::copyPreBarrierTable(uint8_t* dest)
+{
+ if (preBarriers_.length())
+ memcpy(dest, preBarriers_.buffer(), preBarriers_.length());
+}
+
+void
+AssemblerMIPSShared::processCodeLabels(uint8_t* rawCode)
+{
+ for (size_t i = 0; i < codeLabels_.length(); i++) {
+ CodeLabel label = codeLabels_[i];
+ Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
+ }
+}
+
+AssemblerMIPSShared::Condition
+AssemblerMIPSShared::InvertCondition(Condition cond)
+{
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerMIPSShared::DoubleCondition
+AssemblerMIPSShared::InvertCondition(DoubleCondition cond)
+{
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst)
+ : data(inst.encode() & Imm16Mask)
+{
+}
+
+Instruction*
+BOffImm16::getDest(Instruction* src) const
+{
+ return &src[(((int32_t)data << 16) >> 16) + 1];
+}
+
+bool
+AssemblerMIPSShared::oom() const
+{
+ return AssemblerShared::oom() ||
+ m_buffer.oom() ||
+ jumpRelocations_.oom() ||
+ dataRelocations_.oom() ||
+ preBarriers_.oom();
+}
+
+// Size of the instruction stream, in bytes.
+size_t
+AssemblerMIPSShared::size() const
+{
+ return m_buffer.size();
+}
+
+// Size of the relocation table, in bytes.
+size_t
+AssemblerMIPSShared::jumpRelocationTableBytes() const
+{
+ return jumpRelocations_.length();
+}
+
+size_t
+AssemblerMIPSShared::dataRelocationTableBytes() const
+{
+ return dataRelocations_.length();
+}
+
+size_t
+AssemblerMIPSShared::preBarrierTableBytes() const
+{
+ return preBarriers_.length();
+}
+
+// Size of the data table, in bytes.
+size_t
+AssemblerMIPSShared::bytesNeeded() const
+{
+ return size() +
+ jumpRelocationTableBytes() +
+ dataRelocationTableBytes() +
+ preBarrierTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset
+AssemblerMIPSShared::writeInst(uint32_t x, uint32_t* dest)
+{
+ if (dest == nullptr)
+ return m_buffer.putInt(x);
+
+ WriteInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void
+AssemblerMIPSShared::WriteInstStatic(uint32_t x, uint32_t* dest)
+{
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset
+AssemblerMIPSShared::haltingAlign(int alignment)
+{
+ // TODO: Implement a proper halting align.
+ return nopAlign(alignment);
+}
+
+BufferOffset
+AssemblerMIPSShared::nopAlign(int alignment)
+{
+ BufferOffset ret;
+ MOZ_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned())
+ ret = tmp;
+ }
+ } else {
+ MOZ_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned())
+ ret = tmp;
+ }
+ }
+ return ret;
+}
+
+BufferOffset
+AssemblerMIPSShared::as_nop()
+{
+ return writeInst(op_special | ff_sll);
+}
+
+// Logical operations.
+BufferOffset
+AssemblerMIPSShared::as_and(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_or(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_xor(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_nor(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_andi(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ori(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_xori(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lui(Register rd, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
+}
+
+// Branch and jump instructions
+BufferOffset
+AssemblerMIPSShared::as_bal(BOffImm16 off)
+{
+ BufferOffset bo = writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
+ return bo;
+}
+
+BufferOffset
+AssemblerMIPSShared::as_b(BOffImm16 off)
+{
+ BufferOffset bo = writeInst(InstImm(op_beq, zero, zero, off).encode());
+ return bo;
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(JumpOrCall jumpOrCall)
+{
+ if (jumpOrCall == BranchIsCall)
+ return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+
+ return InstImm(op_beq, zero, zero, BOffImm16(0));
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(Register s, Register t, Condition c)
+{
+ MOZ_ASSERT(c == AssemblerMIPSShared::Equal || c == AssemblerMIPSShared::NotEqual);
+ return InstImm(c == AssemblerMIPSShared::Equal ? op_beq : op_bne, s, t, BOffImm16(0));
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(Register s, Condition c)
+{
+ switch (c) {
+ case AssemblerMIPSShared::Equal:
+ case AssemblerMIPSShared::Zero:
+ case AssemblerMIPSShared::BelowOrEqual:
+ return InstImm(op_beq, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::NotEqual:
+ case AssemblerMIPSShared::NonZero:
+ case AssemblerMIPSShared::Above:
+ return InstImm(op_bne, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThan:
+ return InstImm(op_bgtz, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThanOrEqual:
+ case AssemblerMIPSShared::NotSigned:
+ return InstImm(op_regimm, s, rt_bgez, BOffImm16(0));
+ case AssemblerMIPSShared::LessThan:
+ case AssemblerMIPSShared::Signed:
+ return InstImm(op_regimm, s, rt_bltz, BOffImm16(0));
+ case AssemblerMIPSShared::LessThanOrEqual:
+ return InstImm(op_blez, s, zero, BOffImm16(0));
+ default:
+ MOZ_CRASH("Condition not supported.");
+ }
+}
+
+InstImm
+AssemblerMIPSShared::getBranchCode(FloatTestKind testKind, FPConditionBit fcc)
+{
+ MOZ_ASSERT(!(fcc && FccMask));
+ uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift)) << RTShift;
+
+ return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
+}
+
+BufferOffset
+AssemblerMIPSShared::as_j(JOffImm26 off)
+{
+ BufferOffset bo = writeInst(InstJump(op_j, off).encode());
+ return bo;
+}
+BufferOffset
+AssemblerMIPSShared::as_jal(JOffImm26 off)
+{
+ BufferOffset bo = writeInst(InstJump(op_jal, off).encode());
+ return bo;
+}
+
+BufferOffset
+AssemblerMIPSShared::as_jr(Register rs)
+{
+ BufferOffset bo = writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode());
+ return bo;
+}
+BufferOffset
+AssemblerMIPSShared::as_jalr(Register rs)
+{
+ BufferOffset bo = writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode());
+ return bo;
+}
+
+
+// Arithmetic instructions
+BufferOffset
+AssemblerMIPSShared::as_addu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_addiu(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_daddu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_daddu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_daddiu(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ return writeInst(InstImm(op_daddiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_subu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsubu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsubu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mult(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_mult).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_multu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_multu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmult(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_dmult).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmultu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_dmultu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_div(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_div).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_divu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_divu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ddiv(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_ddiv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ddivu(Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, ff_ddivu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mul(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode());
+}
+
+// Shift instructions
+BufferOffset
+AssemblerMIPSShared::as_sll(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsll(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsll).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsll32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsll32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sllv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsllv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsllv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_srl(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrl(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrl32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_srlv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrlv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrlv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sra(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsra(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsra).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsra32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsra32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_srav(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dsrav(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrav).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_rotr(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_drotr(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(sa < 32);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_drotr32(Register rd, Register rt, uint16_t sa)
+{
+ MOZ_ASSERT(31 < sa && sa < 64);
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_rotrv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_drotrv(Register rd, Register rt, Register rs)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_dsrlv).encode());
+}
+
+// Load and store instructions
+BufferOffset
+AssemblerMIPSShared::as_lb(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lbu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lh(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lhu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lw(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lwu(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lwl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_lwr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ll(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ll, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ld(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ld, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ldl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ldl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ldr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_ldr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sb(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sh(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sw(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_swl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_swr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sc(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sc, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sd(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sd, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sdl(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sdl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sdr(Register rd, Register rs, int16_t off)
+{
+ return writeInst(InstImm(op_sdr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslbx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssbx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslhx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsshx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslwx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsswx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdx(Register rd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslq(Register rh, Register rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssq(Register rh, Register rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+// Move from HI/LO register.
+BufferOffset
+AssemblerMIPSShared::as_mfhi(Register rd)
+{
+ return writeInst(InstReg(op_special, rd, ff_mfhi).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mflo(Register rd)
+{
+ return writeInst(InstReg(op_special, rd, ff_mflo).encode());
+}
+
+// Set on less than.
+BufferOffset
+AssemblerMIPSShared::as_slt(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sltu(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_slti(Register rd, Register rs, int32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sltiu(Register rd, Register rs, uint32_t j)
+{
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
+}
+
+// Conditional move.
+BufferOffset
+AssemblerMIPSShared::as_movz(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movn(Register rd, Register rs, Register rt)
+{
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movt(Register rd, Register rs, uint16_t cc)
+{
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 1);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movf(Register rd, Register rs, uint16_t cc)
+{
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 0);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+// Bit twiddling.
+BufferOffset
+AssemblerMIPSShared::as_clz(Register rd, Register rs)
+{
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_clz).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dclz(Register rd, Register rs)
+{
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_dclz).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ins(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dins(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dins).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size >= 2 && size <= 64 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dinsm).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size >= 1 && size <= 32 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dinsu).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ext(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
+}
+
+// Sign extend
+BufferOffset
+AssemblerMIPSShared::as_seb(Register rd, Register rt)
+{
+ return writeInst(InstReg(op_special3, zero, rt, rd, 16, ff_bshfl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_seh(Register rd, Register rt)
+{
+ return writeInst(InstReg(op_special3, zero, rt, rd, 24, ff_bshfl).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dext(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 && pos + size <= 63);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dext).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dextm(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos < 32 && size > 32 && size <= 64 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1 - 32);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dextm).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dextu(Register rt, Register rs, uint16_t pos, uint16_t size)
+{
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size != 0 && size <= 32 && pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos - 32, ff_dextu).encode());
+}
+
+// FP instructions
+BufferOffset
+AssemblerMIPSShared::as_ld(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_ldc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sd(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_sdc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ls(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_lwc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ss(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ return writeInst(InstImm(op_swc1, base, fd, Imm16(off)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslsl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslsr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsssl(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsssr(FloatRegister fd, Register base, int32_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslsx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsssx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gsldx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssdx(FloatRegister fd, Register rs, Register ri, int16_t off)
+{
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gslq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_gssq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off)
+{
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ return writeInst(InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movs(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ctc1(Register rt, FPControl fc)
+{
+ return writeInst(InstReg(op_cop1, rs_ctc1, rt, FloatRegister(fc)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cfc1(Register rt, FPControl fc)
+{
+ return writeInst(InstReg(op_cop1, rs_cfc1, rt, FloatRegister(fc)).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mtc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mfc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mthc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mthc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_mfhc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_mfhc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmtc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_dmtc1, rt, fs).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_dmfc1(Register rt, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_dmfc1, rt, fs).encode());
+}
+
+// FP convert instructions
+BufferOffset
+AssemblerMIPSShared::as_ceilws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_floorws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_roundws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncls(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ceilwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_floorwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_roundwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_truncld(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtdl(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtds(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtdw(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtsd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtsl(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtsw(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtwd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cvtws(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+// FP arithmetic instructions
+BufferOffset
+AssemblerMIPSShared::as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_abss(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_absd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_negs(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_negd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft)
+{
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sqrts(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_sqrtd(FloatRegister fd, FloatRegister fs)
+{
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+// FP compare instructions
+BufferOffset
+AssemblerMIPSShared::as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+}
+
+// FP conditional move.
+BufferOffset
+AssemblerMIPSShared::as_movt(FloatFormat fmt, FloatRegister fd, FloatRegister fs, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ Register rt = Register::FromCode(fcc << 2 | 1);
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movf_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movf(FloatFormat fmt, FloatRegister fd, FloatRegister fs, FPConditionBit fcc)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ Register rt = Register::FromCode(fcc << 2 | 0);
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movf_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movz(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movz_fmt).encode());
+}
+
+BufferOffset
+AssemblerMIPSShared::as_movn(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt)
+{
+ RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+ return writeInst(InstReg(op_cop1, rs, rt, fs, fd, ff_movn_fmt).encode());
+}
+
+void
+AssemblerMIPSShared::bind(Label* label, BufferOffset boff)
+{
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom())
+ return;
+
+ Instruction* inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void
+AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target)
+{
+ if (label->used()) {
+ int32_t next;
+
+ BufferOffset b(label);
+ do {
+ Instruction* inst = editSrc(b);
+
+ append(wasm::TrapSite(target, b.getOffset()));
+ next = inst[1].encode();
+ inst[1].makeNop();
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->reset();
+}
+
+void
+AssemblerMIPSShared::retarget(Label* label, Label* target)
+{
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction* inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction* inst = editSrc(labelBranchOffset);
+ int32_t prev = target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ DebugOnly<uint32_t> prev = target->use(label->offset());
+ MOZ_ASSERT((int32_t)prev == Label::INVALID_OFFSET);
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+void
+AssemblerMIPSShared::as_break(uint32_t code)
+{
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ writeInst(op_special | code << FunctionBits | ff_break);
+}
+
+void
+AssemblerMIPSShared::as_sync(uint32_t stype)
+{
+ MOZ_ASSERT(stype <= 31);
+ writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void
+AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
+{
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t*
+AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
+{
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr)
+ *count += sizeof(Instruction);
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+// Since there are no pools in MIPS implementation, this should be simple.
+Instruction*
+Instruction::next()
+{
+ return this + 1;
+}
+
+InstImm AssemblerMIPSShared::invertBranch(InstImm branch, BOffImm16 skipOffset)
+{
+ uint32_t rt = 0;
+ Opcode op = (Opcode) (branch.extractOpcode() << OpcodeShift);
+ switch(op) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq);
+ return branch;
+ case op_bgtz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blez);
+ return branch;
+ case op_blez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgtz);
+ return branch;
+ case op_regimm:
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt == (rt_bltz >> RTShift)) {
+ branch.setRT(rt_bgez);
+ return branch;
+ }
+ if (rt == (rt_bgez >> RTShift)) {
+ branch.setRT(rt_bltz);
+ return branch;
+ }
+
+ MOZ_CRASH("Error creating long branch.");
+
+ case op_cop1:
+ MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt & 0x1)
+ branch.setRT((RTField) ((rt & ~0x1) << RTShift));
+ else
+ branch.setRT((RTField) ((rt | 0x1) << RTShift));
+ return branch;
+ default:
+ MOZ_CRASH("Error creating long branch.");
+ }
+}
+
+void
+AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_)
+{
+ InstImm * inst = (InstImm*)inst_.raw();
+
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
+ // We converted beq to andi, so now we restore it.
+ inst->setOpcode(op_beq);
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
+void
+AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_)
+{
+ InstImm * inst = (InstImm*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
+ // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
+ inst->setOpcode(op_andi);
+
+ AutoFlushICache::flush(uintptr_t(inst), 4);
+}
+
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
new file mode 100644
index 000000000..a619fa0e0
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -0,0 +1,1522 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Assembler_mips_shared_h
+#define jit_mips_shared_Assembler_mips_shared_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/IonCode.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/Architecture-mips-shared.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero = { Registers::zero };
+static constexpr Register at = { Registers::at };
+static constexpr Register v0 = { Registers::v0 };
+static constexpr Register v1 = { Registers::v1 };
+static constexpr Register a0 = { Registers::a0 };
+static constexpr Register a1 = { Registers::a1 };
+static constexpr Register a2 = { Registers::a2 };
+static constexpr Register a3 = { Registers::a3 };
+static constexpr Register a4 = { Registers::ta0 };
+static constexpr Register a5 = { Registers::ta1 };
+static constexpr Register a6 = { Registers::ta2 };
+static constexpr Register a7 = { Registers::ta3 };
+static constexpr Register t0 = { Registers::t0 };
+static constexpr Register t1 = { Registers::t1 };
+static constexpr Register t2 = { Registers::t2 };
+static constexpr Register t3 = { Registers::t3 };
+static constexpr Register t4 = { Registers::ta0 };
+static constexpr Register t5 = { Registers::ta1 };
+static constexpr Register t6 = { Registers::ta2 };
+static constexpr Register t7 = { Registers::ta3 };
+static constexpr Register s0 = { Registers::s0 };
+static constexpr Register s1 = { Registers::s1 };
+static constexpr Register s2 = { Registers::s2 };
+static constexpr Register s3 = { Registers::s3 };
+static constexpr Register s4 = { Registers::s4 };
+static constexpr Register s5 = { Registers::s5 };
+static constexpr Register s6 = { Registers::s6 };
+static constexpr Register s7 = { Registers::s7 };
+static constexpr Register t8 = { Registers::t8 };
+static constexpr Register t9 = { Registers::t9 };
+static constexpr Register k0 = { Registers::k0 };
+static constexpr Register k1 = { Registers::k1 };
+static constexpr Register gp = { Registers::gp };
+static constexpr Register sp = { Registers::sp };
+static constexpr Register fp = { Registers::fp };
+static constexpr Register ra = { Registers::ra };
+
+static constexpr Register ScratchRegister = at;
+static constexpr Register SecondScratchReg = t8;
+
+// Helper classes for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of each scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope
+{
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister)
+ { }
+};
+struct SecondScratchRegisterScope : public AutoRegisterScope
+{
+ explicit SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, SecondScratchReg)
+ { }
+};
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static constexpr Register OsrFrameReg = a3;
+static constexpr Register ArgumentsRectifierReg = s3;
+static constexpr Register CallTempReg0 = t0;
+static constexpr Register CallTempReg1 = t1;
+static constexpr Register CallTempReg2 = t2;
+static constexpr Register CallTempReg3 = t3;
+
+static constexpr Register IntArgReg0 = a0;
+static constexpr Register IntArgReg1 = a1;
+static constexpr Register IntArgReg2 = a2;
+static constexpr Register IntArgReg3 = a3;
+static constexpr Register IntArgReg4 = a4;
+static constexpr Register IntArgReg5 = a5;
+static constexpr Register IntArgReg6 = a6;
+static constexpr Register IntArgReg7 = a7;
+static constexpr Register GlobalReg = s6; // used by Odin
+static constexpr Register HeapReg = s7; // used by Odin
+
+static constexpr Register PreBarrierReg = a1;
+
+static constexpr Register InvalidReg = { Registers::invalid_reg };
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = InvalidReg;
+static constexpr Register ReturnReg = v0;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+// A bias applied to the GlobalReg to allow the use of instructions with small
+// negative immediate offsets which doubles the range of global data that can be
+// accessed with a single instruction.
+static const int32_t WasmGlobalRegBias = 32768;
+
+// Registers used in the GenerateFFIIonExit Enable Activation block.
+static constexpr Register WasmIonExitRegCallee = t0;
+static constexpr Register WasmIonExitRegE0 = a0;
+static constexpr Register WasmIonExitRegE1 = a1;
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (t8).
+static constexpr Register WasmIonExitRegD0 = a0;
+static constexpr Register WasmIonExitRegD1 = a1;
+static constexpr Register WasmIonExitRegD2 = t0;
+
+// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registerd used in RegExpTester instruction (do not use ReturnReg).
+static constexpr Register RegExpTesterRegExpReg = CallTempReg0;
+static constexpr Register RegExpTesterStringReg = CallTempReg1;
+static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
+
+static constexpr uint32_t CodeAlignment = 4;
+
+// This boolean indicates whether we support SIMD instructions flavoured for
+// this architecture or not. Rather than a method in the LIRGenerator, it is
+// here such that it is accessible from the entire codebase. Once full support
+// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
+static constexpr bool SupportsSimd = false;
+
+// MIPS instruction types
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 5 | 5 | 6 |
+// +---------------------------------------------------------------+
+// Register type | Opcode | Rs | Rt | Rd | Sa | Function |
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 16 |
+// +---------------------------------------------------------------+
+// Immediate type | Opcode | Rs | Rt | 2's complement constant |
+// +---------------------------------------------------------------+
+// | 6 | 26 |
+// +---------------------------------------------------------------+
+// Jump type | Opcode | jump_target |
+// +---------------------------------------------------------------+
+// 31 bit bit 0
+
+// MIPS instruction encoding constants.
+static const uint32_t OpcodeShift = 26;
+static const uint32_t OpcodeBits = 6;
+static const uint32_t RSShift = 21;
+static const uint32_t RSBits = 5;
+static const uint32_t RTShift = 16;
+static const uint32_t RTBits = 5;
+static const uint32_t RDShift = 11;
+static const uint32_t RDBits = 5;
+static const uint32_t RZShift = 0;
+static const uint32_t RZBits = 5;
+static const uint32_t SAShift = 6;
+static const uint32_t SABits = 5;
+static const uint32_t FunctionShift = 0;
+static const uint32_t FunctionBits = 6;
+static const uint32_t Imm16Shift = 0;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t Imm28Shift = 0;
+static const uint32_t Imm28Bits = 28;
+static const uint32_t ImmFieldShift = 2;
+static const uint32_t FRBits = 5;
+static const uint32_t FRShift = 21;
+static const uint32_t FSShift = 11;
+static const uint32_t FSBits = 5;
+static const uint32_t FTShift = 16;
+static const uint32_t FTBits = 5;
+static const uint32_t FDShift = 6;
+static const uint32_t FDBits = 5;
+static const uint32_t FCccShift = 8;
+static const uint32_t FCccBits = 3;
+static const uint32_t FBccShift = 18;
+static const uint32_t FBccBits = 3;
+static const uint32_t FBtrueShift = 16;
+static const uint32_t FBtrueBits = 1;
+static const uint32_t FccMask = 0x7;
+static const uint32_t FccShift = 2;
+
+
+// MIPS instruction field bit masks.
+static const uint32_t OpcodeMask = ((1 << OpcodeBits) - 1) << OpcodeShift;
+static const uint32_t Imm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift;
+static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift;
+static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift;
+static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
+static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
+static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
+static const uint32_t RegMask = Registers::Total - 1;
+
+static const uint32_t BREAK_STACK_UNALIGNED = 1;
+static const uint32_t MAX_BREAK_CODE = 1024 - 1;
+
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+
+uint32_t RS(Register r);
+uint32_t RT(Register r);
+uint32_t RT(uint32_t regCode);
+uint32_t RT(FloatRegister r);
+uint32_t RD(Register r);
+uint32_t RD(FloatRegister r);
+uint32_t RD(uint32_t regCode);
+uint32_t RZ(Register r);
+uint32_t RZ(FloatRegister r);
+uint32_t SA(uint32_t value);
+uint32_t SA(FloatRegister r);
+
+Register toRS (Instruction& i);
+Register toRT (Instruction& i);
+Register toRD (Instruction& i);
+Register toR (Instruction& i);
+
+// MIPS enums for instruction fields
+enum Opcode {
+ op_special = 0 << OpcodeShift,
+ op_regimm = 1 << OpcodeShift,
+
+ op_j = 2 << OpcodeShift,
+ op_jal = 3 << OpcodeShift,
+ op_beq = 4 << OpcodeShift,
+ op_bne = 5 << OpcodeShift,
+ op_blez = 6 << OpcodeShift,
+ op_bgtz = 7 << OpcodeShift,
+
+ op_addi = 8 << OpcodeShift,
+ op_addiu = 9 << OpcodeShift,
+ op_slti = 10 << OpcodeShift,
+ op_sltiu = 11 << OpcodeShift,
+ op_andi = 12 << OpcodeShift,
+ op_ori = 13 << OpcodeShift,
+ op_xori = 14 << OpcodeShift,
+ op_lui = 15 << OpcodeShift,
+
+ op_cop1 = 17 << OpcodeShift,
+ op_cop1x = 19 << OpcodeShift,
+
+ op_beql = 20 << OpcodeShift,
+ op_bnel = 21 << OpcodeShift,
+ op_blezl = 22 << OpcodeShift,
+ op_bgtzl = 23 << OpcodeShift,
+
+ op_daddi = 24 << OpcodeShift,
+ op_daddiu = 25 << OpcodeShift,
+
+ op_ldl = 26 << OpcodeShift,
+ op_ldr = 27 << OpcodeShift,
+
+ op_special2 = 28 << OpcodeShift,
+ op_special3 = 31 << OpcodeShift,
+
+ op_lb = 32 << OpcodeShift,
+ op_lh = 33 << OpcodeShift,
+ op_lwl = 34 << OpcodeShift,
+ op_lw = 35 << OpcodeShift,
+ op_lbu = 36 << OpcodeShift,
+ op_lhu = 37 << OpcodeShift,
+ op_lwr = 38 << OpcodeShift,
+ op_lwu = 39 << OpcodeShift,
+ op_sb = 40 << OpcodeShift,
+ op_sh = 41 << OpcodeShift,
+ op_swl = 42 << OpcodeShift,
+ op_sw = 43 << OpcodeShift,
+ op_sdl = 44 << OpcodeShift,
+ op_sdr = 45 << OpcodeShift,
+ op_swr = 46 << OpcodeShift,
+
+ op_ll = 48 << OpcodeShift,
+ op_lwc1 = 49 << OpcodeShift,
+ op_lwc2 = 50 << OpcodeShift,
+ op_ldc1 = 53 << OpcodeShift,
+ op_ldc2 = 54 << OpcodeShift,
+ op_ld = 55 << OpcodeShift,
+
+ op_sc = 56 << OpcodeShift,
+ op_swc1 = 57 << OpcodeShift,
+ op_swc2 = 58 << OpcodeShift,
+ op_sdc1 = 61 << OpcodeShift,
+ op_sdc2 = 62 << OpcodeShift,
+ op_sd = 63 << OpcodeShift,
+};
+
+enum RSField {
+ rs_zero = 0 << RSShift,
+ // cop1 encoding of RS field.
+ rs_mfc1 = 0 << RSShift,
+ rs_one = 1 << RSShift,
+ rs_dmfc1 = 1 << RSShift,
+ rs_cfc1 = 2 << RSShift,
+ rs_mfhc1 = 3 << RSShift,
+ rs_mtc1 = 4 << RSShift,
+ rs_dmtc1 = 5 << RSShift,
+ rs_ctc1 = 6 << RSShift,
+ rs_mthc1 = 7 << RSShift,
+ rs_bc1 = 8 << RSShift,
+ rs_s = 16 << RSShift,
+ rs_d = 17 << RSShift,
+ rs_w = 20 << RSShift,
+ rs_l = 21 << RSShift,
+ rs_ps = 22 << RSShift
+};
+
+enum RTField {
+ rt_zero = 0 << RTShift,
+ // regimm encoding of RT field.
+ rt_bltz = 0 << RTShift,
+ rt_bgez = 1 << RTShift,
+ rt_bltzal = 16 << RTShift,
+ rt_bgezal = 17 << RTShift
+};
+
+enum FunctionField {
+ // special encoding of function field.
+ ff_sll = 0,
+ ff_movci = 1,
+ ff_srl = 2,
+ ff_sra = 3,
+ ff_sllv = 4,
+ ff_srlv = 6,
+ ff_srav = 7,
+
+ ff_jr = 8,
+ ff_jalr = 9,
+ ff_movz = 10,
+ ff_movn = 11,
+ ff_break = 13,
+ ff_sync = 15,
+
+ ff_mfhi = 16,
+ ff_mflo = 18,
+
+ ff_dsllv = 20,
+ ff_dsrlv = 22,
+ ff_dsrav = 23,
+
+ ff_mult = 24,
+ ff_multu = 25,
+ ff_div = 26,
+ ff_divu = 27,
+ ff_dmult = 28,
+ ff_dmultu = 29,
+ ff_ddiv = 30,
+ ff_ddivu = 31,
+
+ ff_add = 32,
+ ff_addu = 33,
+ ff_sub = 34,
+ ff_subu = 35,
+ ff_and = 36,
+ ff_or = 37,
+ ff_xor = 38,
+ ff_nor = 39,
+
+ ff_slt = 42,
+ ff_sltu = 43,
+ ff_dadd = 44,
+ ff_daddu = 45,
+ ff_dsub = 46,
+ ff_dsubu = 47,
+
+ ff_tge = 48,
+ ff_tgeu = 49,
+ ff_tlt = 50,
+ ff_tltu = 51,
+ ff_teq = 52,
+ ff_tne = 54,
+ ff_dsll = 56,
+ ff_dsrl = 58,
+ ff_dsra = 59,
+ ff_dsll32 = 60,
+ ff_dsrl32 = 62,
+ ff_dsra32 = 63,
+
+ // special2 encoding of function field.
+ ff_mul = 2,
+ ff_clz = 32,
+ ff_clo = 33,
+ ff_dclz = 36,
+
+ // special3 encoding of function field.
+ ff_ext = 0,
+ ff_dextm = 1,
+ ff_dextu = 2,
+ ff_dext = 3,
+ ff_ins = 4,
+ ff_dinsm = 5,
+ ff_dinsu = 6,
+ ff_dins = 7,
+ ff_bshfl = 32,
+
+ // cop1 encoding of function field.
+ ff_add_fmt = 0,
+ ff_sub_fmt = 1,
+ ff_mul_fmt = 2,
+ ff_div_fmt = 3,
+ ff_sqrt_fmt = 4,
+ ff_abs_fmt = 5,
+ ff_mov_fmt = 6,
+ ff_neg_fmt = 7,
+
+ ff_round_l_fmt = 8,
+ ff_trunc_l_fmt = 9,
+ ff_ceil_l_fmt = 10,
+ ff_floor_l_fmt = 11,
+
+ ff_round_w_fmt = 12,
+ ff_trunc_w_fmt = 13,
+ ff_ceil_w_fmt = 14,
+ ff_floor_w_fmt = 15,
+
+ ff_movf_fmt = 17,
+ ff_movz_fmt = 18,
+ ff_movn_fmt = 19,
+
+ ff_cvt_s_fmt = 32,
+ ff_cvt_d_fmt = 33,
+ ff_cvt_w_fmt = 36,
+ ff_cvt_l_fmt = 37,
+ ff_cvt_ps_s = 38,
+
+ ff_c_f_fmt = 48,
+ ff_c_un_fmt = 49,
+ ff_c_eq_fmt = 50,
+ ff_c_ueq_fmt = 51,
+ ff_c_olt_fmt = 52,
+ ff_c_ult_fmt = 53,
+ ff_c_ole_fmt = 54,
+ ff_c_ule_fmt = 55,
+
+ ff_madd_s = 32,
+ ff_madd_d = 33,
+
+ // Loongson encoding of function field.
+ ff_gsxbx = 0,
+ ff_gsxhx = 1,
+ ff_gsxwx = 2,
+ ff_gsxdx = 3,
+ ff_gsxwlc1 = 4,
+ ff_gsxwrc1 = 5,
+ ff_gsxdlc1 = 6,
+ ff_gsxdrc1 = 7,
+ ff_gsxwxc1 = 6,
+ ff_gsxdxc1 = 7,
+ ff_gsxq = 0x20,
+ ff_gsxqc1 = 0x8020,
+
+ ff_null = 0
+};
+
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16
+{
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16) + 4;
+ }
+
+ explicit BOffImm16(int offset)
+ : data ((offset - 4) >> 2 & Imm16Mask)
+ {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < int(unsigned(INT16_MIN) << 2))
+ return false;
+ if ((offset - 4) > (INT16_MAX << 2))
+ return false;
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16()
+ : data(INVALID)
+ { }
+
+ bool isInvalid() {
+ return data == INVALID;
+ }
+ Instruction* getDest(Instruction* src) const;
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26
+{
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6) + 4;
+ }
+
+ explicit JOffImm26(int offset)
+ : data ((offset - 4) >> 2 & Imm26Mask)
+ {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < -536870912)
+ return false;
+ if ((offset - 4) > 536870908)
+ return false;
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26()
+ : data(INVALID)
+ { }
+
+ bool isInvalid() {
+ return data == INVALID;
+ }
+ Instruction* getDest(Instruction* src);
+
+};
+
+class Imm16
+{
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm)
+ : value(imm)
+ { }
+ uint32_t encode() {
+ return value;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) {
+ return imm <= UINT16_MAX ;
+ }
+ static Imm16 Lower (Imm32 imm) {
+ return Imm16(imm.value & 0xffff);
+ }
+ static Imm16 Upper (Imm32 imm) {
+ return Imm16((imm.value >> 16) & 0xffff);
+ }
+};
+
+class Imm8
+{
+ uint8_t value;
+
+ public:
+ Imm8();
+ Imm8(uint32_t imm)
+ : value(imm)
+ { }
+ uint32_t encode(uint32_t shift) {
+ return value << shift;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT8_MIN && imm <= INT8_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) {
+ return imm <= UINT8_MAX ;
+ }
+ static Imm8 Lower (Imm16 imm) {
+ return Imm8(imm.decodeSigned() & 0xff);
+ }
+ static Imm8 Upper (Imm16 imm) {
+ return Imm8((imm.decodeSigned() >> 8) & 0xff);
+ }
+};
+
+class GSImm13
+{
+ uint16_t value;
+
+ public:
+ GSImm13();
+ GSImm13(uint32_t imm)
+ : value(imm & ~0xf)
+ { }
+ uint32_t encode(uint32_t shift) {
+ return ((value >> 4) & 0x1f) << shift;
+ }
+ int32_t decodeSigned() {
+ return value;
+ }
+ uint32_t decodeUnsigned() {
+ return value;
+ }
+ static bool IsInRange(int32_t imm) {
+ return imm >= int32_t(uint32_t(-256) << 4) && imm <= (255 << 4);
+ }
+};
+
+class Operand
+{
+ public:
+ enum Tag {
+ REG,
+ FREG,
+ MEM
+ };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand (Register reg_)
+ : tag(REG), reg(reg_.code())
+ { }
+
+ Operand (FloatRegister freg)
+ : tag(FREG), reg(freg.code())
+ { }
+
+ Operand (Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value)
+ { }
+
+ Operand (Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off)
+ { }
+
+ Operand (const Address& addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset)
+ { }
+
+ Tag getTag() const {
+ return tag;
+ }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register* r, Imm32* dest) const {
+ MOZ_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+inline Imm32
+Imm64::firstHalf() const
+{
+ return low();
+}
+
+inline Imm32
+Imm64::secondHalf() const
+{
+ return hi();
+}
+
+void
+PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
+ ReprotectCode reprotect = DontReprotect);
+
+void
+PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target);
+
+typedef js::jit::AssemblerBuffer<1024, Instruction> MIPSBuffer;
+
+class MIPSBufferWithExecutableCopy : public MIPSBuffer
+{
+ public:
+ void executableCopy(uint8_t* buffer) {
+ if (this->oom())
+ return;
+
+ for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
+ memcpy(buffer, &cur->instructions, cur->length());
+ buffer += cur->length();
+ }
+ }
+
+ bool appendBuffer(const MIPSBufferWithExecutableCopy& other) {
+ if (this->oom())
+ return false;
+
+ for (Slice* cur = other.head; cur != nullptr; cur = cur->getNext()) {
+ this->putBytes(cur->length(), &cur->instructions);
+ if (this->oom())
+ return false;
+ }
+ return true;
+ }
+};
+
+class AssemblerMIPSShared : public AssemblerShared
+{
+ public:
+
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPConditionBit {
+ FCC0 = 0,
+ FCC1,
+ FCC2,
+ FCC3,
+ FCC4,
+ FCC5,
+ FCC6,
+ FCC7
+ };
+
+ enum FPControl {
+ FIR = 0,
+ UFR,
+ UNFR = 4,
+ FCCR = 25,
+ FEXR,
+ FENR = 28,
+ FCSR = 31
+ };
+
+ enum FloatFormat {
+ SingleFloat,
+ DoubleFloat
+ };
+
+ enum JumpOrCall {
+ BranchIsJump,
+ BranchIsCall
+ };
+
+ enum FloatTestKind {
+ TestForTrue,
+ TestForFalse
+ };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() {
+ return m_buffer.nextOffset();
+ }
+
+ protected:
+ Instruction * editSrc (BufferOffset bo) {
+ return m_buffer.getInst(bo);
+ }
+ public:
+ uint32_t actualIndex(uint32_t) const;
+ static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index);
+ protected:
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch
+ {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ Relocation::Kind kind;
+
+ RelativePatch(BufferOffset offset, void* target, Relocation::Kind kind)
+ : offset(offset),
+ target(target),
+ kind(kind)
+ { }
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+ js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+ CompactBufferWriter preBarriers_;
+
+ MIPSBufferWithExecutableCopy m_buffer;
+
+ public:
+ AssemblerMIPSShared()
+ : m_buffer(),
+ isFinished(false)
+ { }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ void writeRelocation(BufferOffset src) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value))
+ embedsNurseryPointers_ = true;
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+ void writePrebarrierOffset(CodeOffset label) {
+ preBarriers_.writeUnsigned(label.offset());
+ }
+
+ public:
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+ }
+
+ static const Register getStackPointer() {
+ return StackPointer;
+ }
+
+ protected:
+ bool isFinished;
+ public:
+ void finish();
+ bool asmMergeWith(const AssemblerMIPSShared& other);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+ void copyPreBarrierTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+ size_t preBarrierTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t* dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ BufferOffset haltingAlign(int alignment);
+ BufferOffset nopAlign(int alignment);
+ BufferOffset as_nop();
+
+ // Branch and jump instructions
+ BufferOffset as_bal(BOffImm16 off);
+ BufferOffset as_b(BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall);
+ InstImm getBranchCode(Register s, Register t, Condition c);
+ InstImm getBranchCode(Register s, Condition c);
+ InstImm getBranchCode(FloatTestKind testKind, FPConditionBit fcc);
+
+ BufferOffset as_j(JOffImm26 off);
+ BufferOffset as_jal(JOffImm26 off);
+
+ BufferOffset as_jr(Register rs);
+ BufferOffset as_jalr(Register rs);
+
+ // Arithmetic instructions
+ BufferOffset as_addu(Register rd, Register rs, Register rt);
+ BufferOffset as_addiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_daddu(Register rd, Register rs, Register rt);
+ BufferOffset as_daddiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_subu(Register rd, Register rs, Register rt);
+ BufferOffset as_dsubu(Register rd, Register rs, Register rt);
+ BufferOffset as_mult(Register rs, Register rt);
+ BufferOffset as_multu(Register rs, Register rt);
+ BufferOffset as_dmult(Register rs, Register rt);
+ BufferOffset as_dmultu(Register rs, Register rt);
+ BufferOffset as_div(Register rs, Register rt);
+ BufferOffset as_divu(Register rs, Register rt);
+ BufferOffset as_mul(Register rd, Register rs, Register rt);
+ BufferOffset as_ddiv(Register rs, Register rt);
+ BufferOffset as_ddivu(Register rs, Register rt);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rs, Register rt);
+ BufferOffset as_or(Register rd, Register rs, Register rt);
+ BufferOffset as_xor(Register rd, Register rs, Register rt);
+ BufferOffset as_nor(Register rd, Register rs, Register rt);
+
+ BufferOffset as_andi(Register rd, Register rs, int32_t j);
+ BufferOffset as_ori(Register rd, Register rs, int32_t j);
+ BufferOffset as_xori(Register rd, Register rs, int32_t j);
+ BufferOffset as_lui(Register rd, int32_t j);
+
+ // Shift instructions
+ // as_sll(zero, zero, x) instructions are reserved as nop
+ BufferOffset as_sll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_sllv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsllv(Register rd, Register rt, Register rs);
+ BufferOffset as_srl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srlv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrlv(Register rd, Register rt, Register rs);
+ BufferOffset as_sra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srav(Register rd, Register rt, Register rs);
+ BufferOffset as_rotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_rotrv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrav(Register rd, Register rt, Register rs);
+ BufferOffset as_drotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotr32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotrv(Register rd, Register rt, Register rs);
+
+ // Load and store instructions
+ BufferOffset as_lb(Register rd, Register rs, int16_t off);
+ BufferOffset as_lbu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lh(Register rd, Register rs, int16_t off);
+ BufferOffset as_lhu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lw(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwl(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwr(Register rd, Register rs, int16_t off);
+ BufferOffset as_ll(Register rd, Register rs, int16_t off);
+ BufferOffset as_ld(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldl(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sb(Register rd, Register rs, int16_t off);
+ BufferOffset as_sh(Register rd, Register rs, int16_t off);
+ BufferOffset as_sw(Register rd, Register rs, int16_t off);
+ BufferOffset as_swl(Register rd, Register rs, int16_t off);
+ BufferOffset as_swr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sc(Register rd, Register rs, int16_t off);
+ BufferOffset as_sd(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdl(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdr(Register rd, Register rs, int16_t off);
+
+ // Loongson-specific load and store instructions
+ BufferOffset as_gslbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslhx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsshx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslwx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsswx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsldx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssdx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslq(Register rh, Register rl, Register rs, int16_t off);
+ BufferOffset as_gssq(Register rh, Register rl, Register rs, int16_t off);
+
+ // Move from HI/LO register.
+ BufferOffset as_mfhi(Register rd);
+ BufferOffset as_mflo(Register rd);
+
+ // Set on less than.
+ BufferOffset as_slt(Register rd, Register rs, Register rt);
+ BufferOffset as_sltu(Register rd, Register rs, Register rt);
+ BufferOffset as_slti(Register rd, Register rs, int32_t j);
+ BufferOffset as_sltiu(Register rd, Register rs, uint32_t j);
+
+ // Conditional move.
+ BufferOffset as_movz(Register rd, Register rs, Register rt);
+ BufferOffset as_movn(Register rd, Register rs, Register rt);
+ BufferOffset as_movt(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ BufferOffset as_clz(Register rd, Register rs);
+ BufferOffset as_dclz(Register rd, Register rs);
+ BufferOffset as_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Sign extend
+ BufferOffset as_seb(Register rd, Register rt);
+ BufferOffset as_seh(Register rd, Register rt);
+
+ // FP instructions
+
+ // Use these two functions only when you are sure address is aligned.
+ // Otherwise, use ma_ld and ma_sd.
+ BufferOffset as_ld(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_sd(FloatRegister fd, Register base, int32_t off);
+
+ BufferOffset as_ls(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_ss(FloatRegister fd, Register base, int32_t off);
+
+ // Loongson-specific FP load and store instructions
+ BufferOffset as_gsldl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsldr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsssx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsldx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssdx(FloatRegister fd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off);
+ BufferOffset as_gssq(FloatRegister rh, FloatRegister rl, Register rs, int16_t off);
+
+ BufferOffset as_movs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_movd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ctc1(Register rt, FPControl fc);
+ BufferOffset as_cfc1(Register rt, FPControl fc);
+
+ BufferOffset as_mtc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfc1(Register rt, FloatRegister fs);
+
+ BufferOffset as_mthc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfhc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmtc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmfc1(Register rt, FloatRegister fs);
+
+ public:
+ // FP convert instructions
+ BufferOffset as_ceilws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncls(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ceilwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncld(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_cvtdl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtds(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtdw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtld(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtls(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtws(FloatRegister fd, FloatRegister fs);
+
+ // FP arithmetic instructions
+ BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+
+ BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
+
+ // FP compare instructions
+ BufferOffset as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+
+ // FP conditional move.
+ BufferOffset as_movt(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movf(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movz(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt);
+ BufferOffset as_movn(FloatFormat fmt, FloatRegister fd, FloatRegister fs, Register rt);
+
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ void bindLater(Label* label, wasm::TrapDesc target);
+ virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ virtual void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) = 0;
+ void bind(CodeOffset* label) {
+ label->bind(currentOffset());
+ }
+ uint32_t currentOffset() {
+ return nextOffset().getOffset();
+ }
+ void retarget(Label* label, Label* target);
+
+ // See Bind
+ size_t labelToPatchOffset(CodeOffset label) { return label.offset(); }
+
+ void call(Label* label);
+ void call(void* target);
+
+ void as_break(uint32_t code);
+ void as_sync(uint32_t stype = 0);
+
+ public:
+ static bool SupportsFloatingPoint() {
+#if (defined(__mips_hard_float) && !defined(__mips_single_float)) || \
+ defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool SupportsUnalignedAccesses() {
+ return true;
+ }
+ static bool SupportsSimd() {
+ return js::jit::SupportsSimd;
+ }
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void addPendingJump(BufferOffset src, ImmPtr target, Relocation::Kind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == Relocation::JITCODE)
+ writeRelocation(src);
+ }
+
+ void addLongJump(BufferOffset src) {
+ enoughMemory_ &= longJumps_.append(src.getOffset());
+ }
+
+ public:
+ size_t numLongJumps() const {
+ return longJumps_.length();
+ }
+ uint32_t longJump(size_t i) {
+ return longJumps_[i];
+ }
+
+ void flushBuffer() {
+ }
+
+ void comment(const char* msg) {
+ // This is not implemented because setPrinter() is not implemented.
+ // TODO spew("; %s", msg);
+ }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ return (offset + 1U) &~ 1U;
+ }
+
+ static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ bool bailed() {
+ return m_buffer.bail();
+ }
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess)
+ {
+ // Implement this if we implement a disassembler.
+ }
+}; // AssemblerMIPSShared
+
+// sll zero, zero, 0
+const uint32_t NopInst = 0x00000000;
+
+// An Instruction is a structure for both encoding and decoding any and all
+// MIPS instructions.
+class Instruction
+{
+ protected:
+ uint32_t data;
+
+ // Standard constructor
+ Instruction (uint32_t data_) : data(data_) { }
+
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+ public:
+ uint32_t encode() const {
+ return data;
+ }
+
+ void makeNop() {
+ data = NopInst;
+ }
+
+ void setData(uint32_t data) {
+ this->data = data;
+ }
+
+ const Instruction & operator=(const Instruction& src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) {
+ return (encode() >> bit) & 1;
+ }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+ // Since all MIPS instructions have opcode, the opcode
+ // extractor resides in the base class.
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ // Return the fields at their original place in the instruction encoding.
+ Opcode OpcodeFieldRaw() const {
+ return static_cast<Opcode>(encode() & OpcodeMask);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4, "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction
+{
+ public:
+ InstNOP()
+ : Instruction(NopInst)
+ { }
+
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction
+{
+ public:
+ InstReg(Opcode op, Register rd, FunctionField ff)
+ : Instruction(op | RD(rd) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, Register rd, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | ff)
+ { }
+ InstReg(Opcode op, Register rs, Register rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, Register rs, RTField rt, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | rt | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, Register rs, uint32_t cc, Register rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | RS(rs) | cc | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, uint32_t code, FunctionField ff)
+ : Instruction(op | code | ff)
+ { }
+ // for float point
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd)
+ : Instruction(op | rs | RT(rt) | RD(rd))
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister rd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, Register rt, FloatRegister fs, FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(fs) | SA(fd) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fs, FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fs) | SA(fd) | ff)
+ { }
+ InstReg(Opcode op, RSField rs, FloatRegister ft, FloatRegister fd, uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fd) | SA(sa) | ff)
+ { }
+
+ uint32_t extractRS () {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT () {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ uint32_t extractRD () {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA () {
+ return extractBitField(SAShift + SABits - 1, SAShift);
+ }
+ uint32_t extractFunctionField () {
+ return extractBitField(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction
+{
+ public:
+ void extractImm16(BOffImm16* dest);
+
+ InstImm(Opcode op, Register rs, Register rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+ InstImm(Opcode op, Register rs, RTField rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | rt | off.encode())
+ { }
+ InstImm(Opcode op, RSField rs, uint32_t cc, BOffImm16 off)
+ : Instruction(op | rs | cc | off.encode())
+ { }
+ InstImm(Opcode op, Register rs, Register rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+ InstImm(uint32_t raw)
+ : Instruction(raw)
+ { }
+ // For floating-point loads and stores.
+ InstImm(Opcode op, Register rs, FloatRegister rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode())
+ { }
+
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ void setOpcode(Opcode op) {
+ data = (data & ~OpcodeMask) | op;
+ }
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ void setRT(RTField rt) {
+ data = (data & ~RTMask) | rt;
+ }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+ void setImm16(Imm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction
+{
+ public:
+ InstJump(Opcode op, JOffImm26 off)
+ : Instruction(op | off.encode())
+ { }
+
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+// Class for Loongson-specific instructions
+class InstGS : public Instruction
+{
+ public:
+ // For indexed loads and stores.
+ InstGS(Opcode op, Register rs, Register rt, Register rd, Imm8 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff)
+ { }
+ InstGS(Opcode op, Register rs, FloatRegister rt, Register rd, Imm8 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff)
+ { }
+ // For quad-word loads and stores.
+ InstGS(Opcode op, Register rs, Register rt, Register rz, GSImm13 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff)
+ { }
+ InstGS(Opcode op, Register rs, FloatRegister rt, FloatRegister rz, GSImm13 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff)
+ { }
+ InstGS(uint32_t raw)
+ : Instruction(raw)
+ { }
+ // For floating-point unaligned loads and stores.
+ InstGS(Opcode op, Register rs, FloatRegister rt, Imm8 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode(6) | ff)
+ { }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Assembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
new file mode 100644
index 000000000..31e221ab2
--- /dev/null
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -0,0 +1,241 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+#ifndef jit_mips_shared_AtomicOperations_mips_shared_h
+#define jit_mips_shared_AtomicOperations_mips_shared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#if defined(__clang__) || defined(__GNUC__)
+
+// The default implementation tactic for gcc/clang is to use the newer
+// __atomic intrinsics added for use in C++11 <atomic>. Where that
+// isn't available, we use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
+// compatible option for older compilers: enable this to use GCC's old
+// __sync functions instead of the newer __atomic functions. This
+// will be required for GCC 4.6.x and earlier, and probably for Clang
+// 3.1, should we need to use those versions.
+
+//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+inline bool
+js::jit::AtomicOperations::isLockfree8()
+{
+# ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+# if _MIPS_SIM == _ABI64
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+# endif
+ return true;
+# else
+ return false;
+# endif
+}
+
+inline void
+js::jit::AtomicOperations::fenceSeqCst()
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+# else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSeqCst(T* addr)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+# else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+# endif
+ return v;
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+# else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+# else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ return oldval;
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+# else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+# else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+# else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+# else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
+{
+ static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+# else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+# endif
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
+{
+ return *addr; // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline void
+js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
+{
+ *addr = val; // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+inline void
+js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
+{
+ ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
+}
+
+template<typename T>
+inline T
+js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
+{
+ MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+# else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::acquire(void* addr)
+{
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
+ ;
+# else
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
+ zero = 0;
+ continue;
+ }
+# endif
+}
+
+template<size_t nbytes>
+inline void
+js::jit::RegionLock::release(void* addr)
+{
+ MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
+# ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_sub_and_fetch(&spinlock, 1);
+# else
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+# endif
+}
+
+# undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+#elif defined(ENABLE_SHARED_ARRAY_BUFFER)
+
+# error "Either disable JS shared memory at compile time, use GCC or Clang, or add code here"
+
+#endif
+
+#endif // jit_mips_shared_AtomicOperations_mips_shared_h
diff --git a/js/src/jit/mips-shared/Bailouts-mips-shared.cpp b/js/src/jit/mips-shared/Bailouts-mips-shared.cpp
new file mode 100644
index 000000000..7d8c2a76d
--- /dev/null
+++ b/js/src/jit/mips-shared/Bailouts-mips-shared.cpp
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+
+using namespace js;
+using namespace js::jit;
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ InvalidationBailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ framePointer_ = (uint8_t*) bailout->fp();
+ topFrameSize_ = framePointer_ - bailout->sp();
+ topIonScript_ = bailout->ionScript();
+ attachOnJitActivation(activations);
+
+ uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress();
+ const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_);
+ snapshotOffset_ = osiIndex->snapshotOffset();
+}
diff --git a/js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp
new file mode 100644
index 000000000..b8d8017a2
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/BaselineCompiler-mips-shared.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPSShared::BaselineCompilerMIPSShared(JSContext* cx, TempAllocator& alloc,
+ JSScript* script)
+ : BaselineCompilerShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/mips-shared/BaselineCompiler-mips-shared.h b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.h
new file mode 100644
index 000000000..43f32f997
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineCompiler-mips-shared.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_BaselineCompiler_mips_shared_h
+#define jit_mips_shared_BaselineCompiler_mips_shared_h
+
+#include "jit/shared/BaselineCompiler-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPSShared : public BaselineCompilerShared
+{
+ protected:
+ BaselineCompilerMIPSShared(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_BaselineCompiler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
new file mode 100644
index 000000000..dc4fcab1a
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+bool
+ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+
+ masm.ma_cmp_set_double(dest, FloatReg0, FloatReg1, doubleCond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
new file mode 100644
index 000000000..f3c776f42
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -0,0 +1,2931 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jsscriptinlines.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+using JS::GenericNaN;
+using JS::ToInt32;
+
+// shared
+CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm)
+{
+}
+
+Operand
+CodeGeneratorMIPSShared::ToOperand(const LAllocation& a)
+{
+ if (a.isGeneralReg())
+ return Operand(a.toGeneralReg()->reg());
+ if (a.isFloatReg())
+ return Operand(a.toFloatReg()->reg());
+ return Operand(masm.getStackPointer(), ToStackOffset(&a));
+}
+
+Operand
+CodeGeneratorMIPSShared::ToOperand(const LAllocation* a)
+{
+ return ToOperand(*a);
+}
+
+Operand
+CodeGeneratorMIPSShared::ToOperand(const LDefinition* def)
+{
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand
+CodeGeneratorMIPSShared::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToOperand(input.value());
+}
+#else
+Register64
+CodeGeneratorMIPSShared::ToOperandOrRegister64(const LInt64Allocation input)
+{
+ return ToRegister64(input);
+}
+#endif
+
+void
+CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir, Assembler::DoubleCondition cond)
+{
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ Label* label = mir->lir()->label();
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+
+ CodeOffsetJump backedge;
+ Label skip;
+ if (fmt == Assembler::DoubleFloat)
+ masm.ma_bc1d(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+ else
+ masm.ma_bc1s(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+
+ backedge = masm.backedgeJump(&rejoin);
+ masm.bind(&rejoin);
+ masm.bind(&skip);
+
+ if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
+ MOZ_CRASH();
+ } else {
+ if (fmt == Assembler::DoubleFloat)
+ masm.branchDouble(cond, lhs, rhs, mir->lir()->label());
+ else
+ masm.branchFloat(cond, lhs, rhs, mir->lir()->label());
+ }
+}
+
+void
+OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen)
+{
+ codegen->visitOutOfLineBailout(this);
+}
+
+void
+CodeGeneratorMIPSShared::visitTestIAndBranch(LTestIAndBranch* test)
+{
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void
+CodeGeneratorMIPSShared::visitCompare(LCompare* comp)
+{
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object) {
+ if (right->isGeneralReg())
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ else
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ return;
+ }
+#endif
+
+ if (right->isConstant())
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)), ToRegister(def));
+ else if (right->isGeneralReg())
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ else
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareAndBranch(LCompareAndBranch* comp)
+{
+ MCompare* mir = comp->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object) {
+ if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+ return;
+ }
+#endif
+
+ if (comp->right()->isConstant()) {
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.load32(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+}
+
+bool
+CodeGeneratorMIPSShared::generateOutOfLineCode()
+{
+ if (!CodeGeneratorShared::generateOutOfLineCode())
+ return false;
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ JitCode* handler = gen->jitRuntime()->getGenericBailoutHandler();
+
+ masm.branch(handler);
+ }
+
+ return !masm.oom();
+}
+
+void
+CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot)
+{
+ if (masm.bailed())
+ return;
+
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ // Though the assembler doesn't track all frame pushes, at least make sure
+ // the known value makes sense. We can't use bailout tables if the stack
+ // isn't properly aligned to the static frame size.
+ MOZ_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
+ frameClass_.frameSize() == masm.framePushed());
+
+ // We don't use table bailouts because retargeting is easier this way.
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+ addOutOfLineCode(ool, new(alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void
+CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot)
+{
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void
+CodeGeneratorMIPSShared::visitMinMaxD(LMinMaxD* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax())
+ masm.maxDouble(second, first, true);
+ else
+ masm.minDouble(second, first, true);
+}
+
+void
+CodeGeneratorMIPSShared::visitMinMaxF(LMinMaxF* ins)
+{
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax())
+ masm.maxFloat32(second, first, true);
+ else
+ masm.minFloat32(second, first, true);
+}
+
+void
+CodeGeneratorMIPSShared::visitAbsD(LAbsD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.as_absd(input, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitAbsF(LAbsF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ MOZ_ASSERT(input == ToFloatRegister(ins->output()));
+ masm.as_abss(input, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitSqrtD(LSqrtD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.as_sqrtd(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitSqrtF(LSqrtF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ masm.as_sqrts(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitAddI(LAddI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant())
+ masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant())
+ masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
+ else
+ masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitAddI64(LAddI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorMIPSShared::visitSubI(LSubI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant())
+ masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant())
+ masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
+ else
+ masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitSubI64(LSubI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void
+CodeGeneratorMIPSShared::visitMulI(LMulI* ins)
+{
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow())
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN), ins->snapshot());
+
+ masm.ma_negu(dest, src);
+ break;
+ case 0:
+ masm.move32(Imm32(0), dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_addTestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.as_addu(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_sll(dest, src, Imm32(shift));
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.ma_sll(dest, src, Imm32(shift - shift_rest));
+ masm.add32(src, dest);
+ if (shift_rest != 0)
+ masm.ma_sll(dest, dest, Imm32(shift_rest));
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2, shift)
+ masm.ma_sll(dest, src, Imm32(shift));
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.ma_sra(ScratchRegister, dest, Imm32(shift));
+ bailoutCmp32(Assembler::NotEqual, src, ScratchRegister, ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul_branch_overflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul_branch_overflow(dest, ToRegister(lhs), ToRegister(rhs), &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitMulI64(LMulI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+
+ MOZ_ASSERT(ToRegister64(lhs) == ToOutRegister64(lir));
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitDivI(LDivI* ins)
+{
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(rhs, rhs, trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ masm.ma_b(rhs, temp, trap(mir, wasm::Trap::IntegerOverflow), Assembler::Equal);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.as_div(lhs, rhs);
+ masm.as_mflo(dest);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitDivPowTwoI(LDivPowTwoI* ins)
+{
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.ma_sll(tmp, lhs, Imm32(32 - shift));
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.ma_sra(tmp, lhs, Imm32(31));
+ masm.ma_srl(tmp, tmp, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ } else {
+ masm.ma_srl(tmp, lhs, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.ma_sra(dest, tmp, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitModI(LModI* ins)
+{
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ masm.ma_b(rhs, rhs, trap(mir, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+
+ masm.as_div(lhs, rhs);
+ masm.as_mfhi(dest);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitModPowTwoI(LModPowTwoI* ins)
+{
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitModMaskI(LModMaskI* ins)
+{
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitBitNotI(LBitNotI* ins)
+{
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_not(ToRegister(dest), ToRegister(input));
+}
+
+void
+CodeGeneratorMIPSShared::visitBitOpI(LBitOpI* ins)
+{
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOP_BITOR:
+ if (rhs->isConstant())
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ case JSOP_BITXOR:
+ if (rhs->isConstant())
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ case JSOP_BITAND:
+ if (rhs->isConstant())
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ else
+ masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitBitOpI64(LBitOpI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOP_BITOR:
+ if (IsConstant(rhs))
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITXOR:
+ if (IsConstant(rhs))
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_BITAND:
+ if (IsConstant(rhs))
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ else
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitShiftI(LShiftI* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.ma_sll(dest, lhs, Imm32(shift));
+ else
+ masm.move32(lhs, dest);
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ else
+ masm.move32(lhs, dest);
+ break;
+ case JSOP_URSH:
+ if (shift) {
+ masm.ma_srl(dest, lhs, Imm32(shift));
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible())
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOP_LSH:
+ masm.ma_sll(dest, lhs, dest);
+ break;
+ case JSOP_RSH:
+ masm.ma_sra(dest, lhs, dest);
+ break;
+ case JSOP_URSH:
+ masm.ma_srl(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitShiftI64(LShiftI64* lir)
+{
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ if (shift)
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ if (shift)
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ if (shift)
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOP_LSH:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_RSH:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOP_URSH:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitRotateI64(LRotateI64* lir)
+{
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c)
+ return;
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ else
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ } else {
+ if (mir->isLeftRotate())
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ else
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitUrshD(LUrshD* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_srl(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void
+CodeGeneratorMIPSShared::visitClzI(LClzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_clz(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitCtzI(LCtzI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_ctz(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitPopcntI(LPopcntI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void
+CodeGeneratorMIPSShared::visitPopcntI64(LPopcntI64* ins)
+{
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.popcnt64(input, output, tmp);
+}
+
+void
+CodeGeneratorMIPSShared::visitPowHalfD(LPowHalfD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
+ masm.ma_bc1d(input, ScratchDoubleReg, &skip, Assembler::DoubleNotEqualOrUnordered, ShortJump);
+ masm.as_negd(output, ScratchDoubleReg);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.as_addd(output, input, ScratchDoubleReg);
+ masm.as_sqrtd(output, output);
+
+ masm.bind(&done);
+}
+
+MoveOperand
+CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const
+{
+ if (a.isGeneralReg())
+ return MoveOperand(ToRegister(a));
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ int32_t offset = ToStackOffset(a);
+ MOZ_ASSERT((offset & 3) == 0);
+
+ return MoveOperand(StackPointer, offset);
+}
+
+void
+CodeGeneratorMIPSShared::visitMathD(LMathD* math)
+{
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.as_addd(output, src1, src2);
+ break;
+ case JSOP_SUB:
+ masm.as_subd(output, src1, src2);
+ break;
+ case JSOP_MUL:
+ masm.as_muld(output, src1, src2);
+ break;
+ case JSOP_DIV:
+ masm.as_divd(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitMathF(LMathF* math)
+{
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOP_ADD:
+ masm.as_adds(output, src1, src2);
+ break;
+ case JSOP_SUB:
+ masm.as_subs(output, src1, src2);
+ break;
+ case JSOP_MUL:
+ masm.as_muls(output, src1, src2);
+ break;
+ case JSOP_DIV:
+ masm.as_divs(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitFloor(LFloor* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchDoubleReg;
+ Register output = ToRegister(lir->output());
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_floorwd(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitFloorF(LFloorF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchFloat32Reg;
+ Register output = ToRegister(lir->output());
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ masm.loadConstantFloat32(0.0f, scratch);
+ masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleLo(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_floorws(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitCeil(LCeil* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchDoubleReg;
+ Register output = ToRegister(lir->output());
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ masm.loadConstantDouble(0, scratch);
+ masm.branchDouble(Assembler::DoubleGreaterThan, input, scratch, &performCeil);
+ masm.loadConstantDouble(-1, scratch);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, scratch, &performCeil);
+
+ // If high part is not zero, the input was not 0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&performCeil);
+ masm.as_ceilwd(scratch, input);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitCeilF(LCeilF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = ScratchFloat32Reg;
+ Register output = ToRegister(lir->output());
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ masm.loadConstantFloat32(0.0f, scratch);
+ masm.branchFloat(Assembler::DoubleGreaterThan, input, scratch, &performCeil);
+ masm.loadConstantFloat32(-1.0f, scratch);
+ masm.branchFloat(Assembler::DoubleLessThanOrEqual, input, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ masm.moveFromFloat32(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&performCeil);
+ masm.as_ceilws(scratch, input);
+ masm.moveFromFloat32(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitRound(LRound* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ FloatRegister scratch = ScratchDoubleReg;
+ Register output = ToRegister(lir->output());
+
+ Label bail, negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ masm.loadConstantDouble(0.0, scratch);
+ masm.ma_bc1d(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ masm.moveFromDoubleHi(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&end, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_addd(scratch, input, temp);
+ masm.as_floorwd(scratch, scratch);
+
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ masm.bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ masm.loadConstantDouble(-0.5, scratch);
+ masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &loadJoin);
+ masm.loadConstantDouble(0.5, temp);
+ masm.bind(&loadJoin);
+
+ masm.addDouble(input, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ masm.as_floorwd(scratch, temp);
+ masm.moveFromDoubleLo(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorMIPSShared::visitRoundF(LRoundF* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister temp = ToFloatRegister(lir->temp());
+ FloatRegister scratch = ScratchFloat32Reg;
+ Register output = ToRegister(lir->output());
+
+ Label bail, negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ masm.loadConstantFloat32(0.0f, scratch);
+ masm.ma_bc1s(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ masm.ma_bc1s(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ masm.moveFromFloat32(input, SecondScratchReg);
+ bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot());
+
+ // Input was zero, so return zero.
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&end, ShortJump);
+
+ masm.bind(&skipCheck);
+ masm.as_adds(scratch, input, temp);
+ masm.as_floorws(scratch, scratch);
+
+ masm.moveFromFloat32(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot());
+
+ masm.jump(&end);
+
+ // Input is negative, but isn't -0.
+ masm.bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ masm.loadConstantFloat32(-0.5f, scratch);
+ masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &loadJoin);
+ masm.loadConstantFloat32(0.5f, temp);
+ masm.bind(&loadJoin);
+
+ masm.as_adds(temp, input, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ masm.branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
+ bailoutFrom(&bail, lir->snapshot());
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ masm.as_floorws(scratch, temp);
+ masm.moveFromFloat32(scratch, output);
+
+ bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot());
+
+ masm.bind(&end);
+}
+
+void
+CodeGeneratorMIPSShared::visitTruncateDToInt32(LTruncateDToInt32* ins)
+{
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void
+CodeGeneratorMIPSShared::visitTruncateFToInt32(LTruncateFToInt32* ins)
+{
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
+{
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (mir->isUnsigned()) {
+ // When the input value is Infinity, NaN, or rounds to an integer outside the
+ // range [INT64_MIN; INT64_MAX + 1[, the Invalid Operation flag is set in the FCSR.
+ if (fromType == MIRType::Double)
+ masm.as_truncld(ScratchDoubleReg, input);
+ else if (fromType == MIRType::Float32)
+ masm.as_truncls(ScratchDoubleReg, input);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ // Check that the result is in the uint32_t range.
+ masm.moveFromDoubleHi(ScratchDoubleReg, output);
+ masm.as_cfc1(ScratchRegister, Assembler::FCSR);
+ masm.as_ext(ScratchRegister, ScratchRegister, 16, 1);
+ masm.ma_or(output, ScratchRegister);
+ masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.moveFromFloat32(ScratchDoubleReg, output);
+ return;
+ }
+
+ // When the input value is Infinity, NaN, or rounds to an integer outside the
+ // range [INT32_MIN; INT32_MAX + 1[, the Invalid Operation flag is set in the FCSR.
+ if (fromType == MIRType::Double)
+ masm.as_truncwd(ScratchFloat32Reg, input);
+ else if (fromType == MIRType::Float32)
+ masm.as_truncws(ScratchFloat32Reg, input);
+ else
+ MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
+
+ // Check that the result is in the int32_t range.
+ masm.as_cfc1(output, Assembler::FCSR);
+ masm.as_ext(output, output, 16, 1);
+ masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual);
+
+ masm.bind(ool->rejoin());
+ masm.moveFromFloat32(ScratchFloat32Reg, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool)
+{
+ FloatRegister input = ool->input();
+ MIRType fromType = ool->fromType();
+ MIRType toType = ool->toType();
+
+ // Eagerly take care of NaNs.
+ Label inputIsNaN;
+ if (fromType == MIRType::Double)
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ else if (fromType == MIRType::Float32)
+ masm.branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ else
+ MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
+
+ Label fail;
+
+ // Handle special values (not needed for unsigned values).
+ if (!ool->isUnsigned()) {
+ if (toType == MIRType::Int32) {
+ // MWasmTruncateToInt32
+ if (fromType == MIRType::Double) {
+ // we've used truncwd. the only valid double values that can
+ // truncate to INT32_MIN are in ]INT32_MIN - 1; INT32_MIN].
+ masm.loadConstantDouble(double(INT32_MIN) - 1.0, ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &fail);
+
+ masm.loadConstantDouble(double(INT32_MIN), ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleGreaterThan, input, ScratchDoubleReg, &fail);
+
+ masm.as_truncwd(ScratchFloat32Reg, ScratchDoubleReg);
+ masm.jump(ool->rejoin());
+ }
+ } else if (toType == MIRType::Int64) {
+ if (fromType == MIRType::Double) {
+ masm.loadConstantDouble(double(INT64_MIN), ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleLessThan, input, ScratchDoubleReg, &fail);
+
+ masm.loadConstantDouble(double(INT64_MAX) + 1.0, ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input,
+ ScratchDoubleReg, &fail);
+ masm.jump(ool->rejoin());
+ }
+ }
+ } else {
+ if (toType == MIRType::Int64) {
+ if (fromType == MIRType::Double) {
+ masm.loadConstantDouble(double(-1), ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, ScratchDoubleReg, &fail);
+
+ masm.loadConstantDouble(double(UINT64_MAX) + 1.0, ScratchDoubleReg);
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, input,
+ ScratchDoubleReg, &fail);
+ masm.jump(ool->rejoin());
+ }
+ }
+ }
+
+ // Handle errors.
+ masm.bind(&fail);
+ masm.jump(trap(ool, wasm::Trap::IntegerOverflow));
+
+ masm.bind(&inputIsNaN);
+ masm.jump(trap(ool, wasm::Trap::InvalidConversionToInteger));
+}
+
+void
+CodeGeneratorMIPSShared::visitCopySignF(LCopySignF* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.moveFromFloat32(lhs, lhsi);
+ masm.moveFromFloat32(rhs, rhsi);
+
+ // Combine.
+ masm.as_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToFloat32(rhsi, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitCopySignD(LCopySignD* ins)
+{
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.moveFromDoubleHi(lhs, lhsi);
+ masm.moveFromDoubleHi(rhs, rhsi);
+
+ // Combine.
+ masm.as_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToDoubleHi(rhsi, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitValue(LValue* value)
+{
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void
+CodeGeneratorMIPSShared::visitDouble(LDouble* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorMIPSShared::visitFloat32(LFloat32* ins)
+{
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->getFloat(), ToFloatRegister(out));
+}
+
+void
+CodeGeneratorMIPSShared::visitTestDAndBranch(LTestDAndBranch* test)
+{
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitTestFAndBranch(LTestFAndBranch* test)
+{
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareD(LCompareD* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareF(LCompareF* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareDAndBranch(LCompareDAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareFAndBranch(LCompareFAndBranch* comp)
+{
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitBitAndAndBranch(LBitAndAndBranch* lir)
+{
+ if (lir->right()->isConstant())
+ masm.ma_and(ScratchRegister, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ else
+ masm.as_and(ScratchRegister, ToRegister(lir->left()), ToRegister(lir->right()));
+ emitBranch(ScratchRegister, ScratchRegister, Assembler::NonZero, lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir)
+{
+ masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir)
+{
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitNotI(LNotI* ins)
+{
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitNotD(LNotD* ins)
+{
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.ma_cmp_set_double(dest, in, ScratchDoubleReg, Assembler::DoubleEqualOrUnordered);
+}
+
+void
+CodeGeneratorMIPSShared::visitNotF(LNotF* ins)
+{
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg, Assembler::DoubleEqualOrUnordered);
+}
+
+void
+CodeGeneratorMIPSShared::visitGuardShape(LGuardShape* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadPtr(Address(obj, ShapedObject::offsetOfShape()), tmp);
+ bailoutCmpPtr(Assembler::NotEqual, tmp, ImmGCPtr(guard->mir()->shape()),
+ guard->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitGuardObjectGroup(LGuardObjectGroup* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+ MOZ_ASSERT(obj != tmp);
+
+ masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), tmp);
+ Assembler::Condition cond = guard->mir()->bailOnEquality()
+ ? Assembler::Equal
+ : Assembler::NotEqual;
+ bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->group()), guard->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitGuardClass(LGuardClass* guard)
+{
+ Register obj = ToRegister(guard->input());
+ Register tmp = ToRegister(guard->tempInt());
+
+ masm.loadObjClass(obj, tmp);
+ bailoutCmpPtr(Assembler::NotEqual, tmp, ImmPtr(guard->mir()->getClass()),
+ guard->snapshot());
+}
+
+void
+CodeGeneratorMIPSShared::visitMemoryBarrier(LMemoryBarrier* ins)
+{
+ masm.memoryBarrier(ins->type());
+}
+
+void
+CodeGeneratorMIPSShared::generateInvalidateEpilogue()
+{
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize())
+ masm.nop();
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+ JitCode* thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.branch(thunk);
+
+ // We should never reach this point in JIT code -- the invalidation thunk
+ // should pop the invalidated JS frame and return directly to its caller.
+ masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
+}
+
+void
+CodeGeneratorMIPSShared::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorMIPSShared::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmCall(LWasmCall* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmCallI64(LWasmCallI64* ins)
+{
+ emitWasmCallBase(ins);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset <= INT32_MAX);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ // Maybe add the offset.
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Float64: isFloat = true; break;
+ case Scalar::Float32: isFloat = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ BaseIndex address(HeapReg, ptr, TimesOne);
+
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (isFloat) {
+ if (byteSize == 4)
+ masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
+ else
+ masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
+ } else {
+ masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+ return;
+ }
+
+ if (isFloat) {
+ if (byteSize == 4)
+ masm.loadFloat32(address, ToFloatRegister(lir->output()));
+ else
+ masm.loadDouble(address, ToFloatRegister(lir->output()));
+ } else {
+ masm.ma_load(ToRegister(lir->output()), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmLoad(LWasmLoad* lir)
+{
+ emitWasmLoad(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir)
+{
+ emitWasmLoad(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPSShared::emitWasmStore(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset <= INT32_MAX);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ // Maybe add the offset.
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ case Scalar::Float64: isFloat = true; break;
+ case Scalar::Float32: isFloat = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ BaseIndex address(HeapReg, ptr, TimesOne);
+
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (isFloat) {
+ if (byteSize == 4)
+ masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
+ else
+ masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
+ } else {
+ masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+ return;
+ }
+
+ if (isFloat) {
+ if (byteSize == 4) {
+ masm.storeFloat32(ToFloatRegister(lir->value()), address);
+ } else
+ masm.storeDouble(ToFloatRegister(lir->value()), address);
+ } else {
+ masm.ma_store(ToRegister(lir->value()), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStore(LWasmStore* lir)
+{
+ emitWasmStore(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmUnalignedStore(LWasmUnalignedStore* lir)
+{
+ emitWasmStore(lir);
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
+{
+ const MAsmJSLoadHeap* mir = ins->mir();
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; size = 8; break;
+ case Scalar::Uint8: isSigned = false; size = 8; break;
+ case Scalar::Int16: isSigned = true; size = 16; break;
+ case Scalar::Uint16: isSigned = false; size = 16; break;
+ case Scalar::Int32: isSigned = true; size = 32; break;
+ case Scalar::Uint32: isSigned = false; size = 32; break;
+ case Scalar::Float64: isFloat = true; size = 64; break;
+ case Scalar::Float32: isFloat = true; size = 32; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
+
+ Label done, outOfRange;
+ masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32)
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ else
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32)
+ masm.loadFloat32(Address(GlobalReg, wasm::NaN32GlobalDataOffset - WasmGlobalRegBias),
+ ToFloatRegister(out));
+ else
+ masm.loadDouble(Address(GlobalReg, wasm::NaN64GlobalDataOffset - WasmGlobalRegBias),
+ ToFloatRegister(out));
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+
+ masm.append(wasm::BoundsCheck(bo.getOffset()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
+{
+ const MAsmJSStoreHeap* mir = ins->mir();
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; size = 8; break;
+ case Scalar::Uint8: isSigned = false; size = 8; break;
+ case Scalar::Int16: isSigned = true; size = 16; break;
+ case Scalar::Uint16: isSigned = false; size = 16; break;
+ case Scalar::Int32: isSigned = true; size = 32; break;
+ case Scalar::Uint32: isSigned = false; size = 32; break;
+ case Scalar::Float64: isFloat = true; size = 64; break;
+ case Scalar::Float32: isFloat = true; size = 32; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ Address addr(HeapReg, ptrImm);
+ if (size == 32)
+ masm.storeFloat32(freg, addr);
+ else
+ masm.storeDouble(freg, addr);
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ BaseIndex bi(HeapReg, ptrReg, TimesOne);
+ if (size == 32)
+ masm.storeFloat32(freg, bi);
+ else
+ masm.storeDouble(freg, bi);
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
+
+ Label outOfRange;
+ masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.bind(&outOfRange);
+ masm.append(wasm::BoundsCheck(bo.getOffset()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
+{
+ MAsmJSCompareExchangeHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ const LAllocation* ptr = ins->ptr();
+ Register ptrReg = ToRegister(ptr);
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+
+ masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ srcAddr, oldval, newval, InvalidReg,
+ valueTemp, offsetTemp, maskTemp,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
+{
+ MAsmJSAtomicExchangeHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+
+ masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ srcAddr, value, InvalidReg, valueTemp,
+ offsetTemp, maskTemp, ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ Imm32(ToInt32(value)), srcAddr, flagTemp, InvalidReg,
+ valueTemp, offsetTemp, maskTemp,
+ ToAnyRegister(ins->output()));
+ else
+ atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
+ ToRegister(value), srcAddr, flagTemp, InvalidReg,
+ valueTemp, offsetTemp, maskTemp,
+ ToAnyRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins)
+{
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MAsmJSAtomicBinopHeap* mir = ins->mir();
+ Scalar::Type vt = mir->access().type();
+ Register ptrReg = ToRegister(ins->ptr());
+ Register flagTemp = ToRegister(ins->flagTemp());
+ Register valueTemp = ToRegister(ins->valueTemp());
+ Register offsetTemp = ToRegister(ins->offsetTemp());
+ Register maskTemp = ToRegister(ins->maskTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+
+ BaseIndex srcAddr(HeapReg, ptrReg, TimesOne);
+
+ if (value->isConstant())
+ atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr, flagTemp,
+ valueTemp, offsetTemp, maskTemp);
+ else
+ atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr, flagTemp,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStackArg(LWasmStackArg* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()), Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStackArgI64(LWasmStackArgI64* ins)
+{
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg()))
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ else
+ masm.store64(ToRegister64(ins->arg()), dst);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmSelect(LWasmSelect* ins)
+{
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+ masm.as_movz(out, ToRegister(falseExpr), cond);
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out, "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32)
+ masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr), cond);
+ else if (mirType == MIRType::Double)
+ masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr), cond);
+ else
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32)
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ else if (mirType == MIRType::Double)
+ masm.loadDouble(ToAddress(falseExpr), out);
+ else
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmReinterpret(LWasmReinterpret* lir)
+{
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitUDivOrMod(LUDivOrMod* ins)
+{
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::InvalidConversionToInteger), Assembler::Zero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ masm.as_divu(lhs, rhs);
+ masm.as_mfhi(output);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder())
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ // Get quotient
+ masm.as_mflo(output);
+ }
+
+ if (!ins->mir()->isTruncated())
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPSShared::visitEffectiveAddress(LEffectiveAddress* ins)
+{
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ if (mir->type() == MIRType::Int32)
+ masm.load32(Address(GlobalReg, addr), ToRegister(ins->output()));
+ else if (mir->type() == MIRType::Float32)
+ masm.loadFloat32(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
+ else
+ masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
+}
+
+void
+CodeGeneratorMIPSShared::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+
+ MOZ_ASSERT(IsNumberType(mir->value()->type()));
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ if (mir->value()->type() == MIRType::Int32)
+ masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr));
+ else if (mir->value()->type() == MIRType::Float32)
+ masm.storeFloat32(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
+ else
+ masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
+}
+
+void
+CodeGeneratorMIPSShared::visitNegI(LNegI* ins)
+{
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_negu(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitNegD(LNegD* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negd(output, input);
+}
+
+void
+CodeGeneratorMIPSShared::visitNegF(LNegF* ins)
+{
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negs(output, input);
+}
+
+template<typename S, typename T>
+void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem, Register flagTemp,
+ Register outTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+ MOZ_ASSERT_IF(arrayType == Scalar::Uint32, outTemp != InvalidReg);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor8ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16SignExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor16ZeroExtend(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicFetchAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicFetchSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicFetchAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicFetchOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicFetchXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, outTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ masm.convertUInt32ToDouble(outTemp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+// Binary operation for effect, result discarded.
+template<typename S, typename T>
+void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp)
+{
+ MOZ_ASSERT(flagTemp != InvalidReg);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor8(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor16(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.atomicAdd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchSubOp:
+ masm.atomicSub32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchAndOp:
+ masm.atomicAnd32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchOrOp:
+ masm.atomicOr32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ case AtomicFetchXorOp:
+ masm.atomicXor32(value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array atomic operation");
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const Address& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Imm32& value, const BaseIndex& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const Address& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+template void
+CodeGeneratorMIPSShared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const Register& value, const BaseIndex& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+
+void
+CodeGeneratorMIPSShared::visitWasmAddOffset(LWasmAddOffset* lir)
+{
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ masm.ma_addTestCarry(out, base, Imm32(mir->offset()), trap(mir, wasm::Trap::OutOfBounds));
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op,
+ Scalar::Type arrayType, const LAllocation* value, const T& mem,
+ Register flagTemp, Register outTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, AnyRegister output)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
+{
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->temp1());
+ Register outTemp = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+ const LAllocation* value = lir->value();
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, flagTemp, outTemp,
+ valueTemp, offsetTemp, maskTemp, output);
+ }
+}
+
+template <typename T>
+static inline void
+AtomicBinopToTypedArray(CodeGeneratorMIPSShared* cg, AtomicOp op, Scalar::Type arrayType,
+ const LAllocation* value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ if (value->isConstant())
+ cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+ else
+ cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+CodeGeneratorMIPSShared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
+{
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register flagTemp = ToRegister(lir->flagTemp());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+ const LAllocation* value = lir->value();
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address mem(elements, ToInt32(lir->index()) * width);
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ }
+}
+
+void
+CodeGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir)
+{
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToRegister(lir->valueTemp());
+ Register offsetTemp = ToRegister(lir->offsetTemp());
+ Register maskTemp = ToRegister(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ int width = Scalar::byteSize(arrayType);
+
+ if (lir->index()->isConstant()) {
+ Address dest(elements, ToInt32(lir->index()) * width);
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
+ masm.atomicExchangeToTypedIntArray(arrayType, dest, value, temp,
+ valueTemp, offsetTemp, maskTemp, output);
+ }
+}
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
new file mode 100644
index 000000000..ff5cca196
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_CodeGenerator_mips_shared_h
+#define jit_mips_shared_CodeGenerator_mips_shared_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+class CodeGeneratorMIPSShared : public CodeGeneratorShared
+{
+ friend class MoveResolverMIPS;
+
+ CodeGeneratorMIPSShared* thisFromCtor() {
+ return this;
+ }
+
+ protected:
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template<typename T>
+ void bailoutTest32(Assembler::Condition c, Register lhs, T rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTestPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(Assembler::Zero, reg, Imm32(0xFF), &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ protected:
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir, Assembler::Condition cond)
+ {
+ mir = skipTrivialBlocks(mir);
+
+ Label* label = mir->lir()->label();
+ if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
+ // Note: the backedge is initially a jump to the next instruction.
+ // It will be patched to the target block's label during link().
+ RepatchLabel rejoin;
+ CodeOffsetJump backedge;
+ Label skip;
+
+ masm.ma_b(lhs, rhs, &skip, Assembler::InvertCondition(cond), ShortJump);
+ backedge = masm.backedgeJump(&rejoin);
+ masm.bind(&rejoin);
+ masm.bind(&skip);
+
+ if (!patchableBackedges_.append(PatchableBackedgeInfo(backedge, label, oolEntry)))
+ MOZ_CRASH();
+ } else {
+ masm.ma_b(lhs, rhs, label, cond);
+ }
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir, Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse)
+ {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ public:
+ // Instruction visitors.
+ virtual void visitMinMaxD(LMinMaxD* ins);
+ virtual void visitMinMaxF(LMinMaxF* ins);
+ virtual void visitAbsD(LAbsD* ins);
+ virtual void visitAbsF(LAbsF* ins);
+ virtual void visitSqrtD(LSqrtD* ins);
+ virtual void visitSqrtF(LSqrtF* ins);
+ virtual void visitAddI(LAddI* ins);
+ virtual void visitAddI64(LAddI64* ins);
+ virtual void visitSubI(LSubI* ins);
+ virtual void visitSubI64(LSubI64* ins);
+ virtual void visitBitNotI(LBitNotI* ins);
+ virtual void visitBitOpI(LBitOpI* ins);
+ virtual void visitBitOpI64(LBitOpI64* ins);
+
+ virtual void visitMulI(LMulI* ins);
+ virtual void visitMulI64(LMulI64* ins);
+
+ virtual void visitDivI(LDivI* ins);
+ virtual void visitDivPowTwoI(LDivPowTwoI* ins);
+ virtual void visitModI(LModI* ins);
+ virtual void visitModPowTwoI(LModPowTwoI* ins);
+ virtual void visitModMaskI(LModMaskI* ins);
+ virtual void visitPowHalfD(LPowHalfD* ins);
+ virtual void visitShiftI(LShiftI* ins);
+ virtual void visitShiftI64(LShiftI64* ins);
+ virtual void visitRotateI64(LRotateI64* lir);
+ virtual void visitUrshD(LUrshD* ins);
+
+ virtual void visitClzI(LClzI* ins);
+ virtual void visitCtzI(LCtzI* ins);
+ virtual void visitPopcntI(LPopcntI* ins);
+ virtual void visitPopcntI64(LPopcntI64* lir);
+
+ virtual void visitTestIAndBranch(LTestIAndBranch* test);
+ virtual void visitCompare(LCompare* comp);
+ virtual void visitCompareAndBranch(LCompareAndBranch* comp);
+ virtual void visitTestDAndBranch(LTestDAndBranch* test);
+ virtual void visitTestFAndBranch(LTestFAndBranch* test);
+ virtual void visitCompareD(LCompareD* comp);
+ virtual void visitCompareF(LCompareF* comp);
+ virtual void visitCompareDAndBranch(LCompareDAndBranch* comp);
+ virtual void visitCompareFAndBranch(LCompareFAndBranch* comp);
+ virtual void visitBitAndAndBranch(LBitAndAndBranch* lir);
+ virtual void visitWasmUint32ToDouble(LWasmUint32ToDouble* lir);
+ virtual void visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir);
+ virtual void visitNotI(LNotI* ins);
+ virtual void visitNotD(LNotD* ins);
+ virtual void visitNotF(LNotF* ins);
+
+ virtual void visitMathD(LMathD* math);
+ virtual void visitMathF(LMathF* math);
+ virtual void visitFloor(LFloor* lir);
+ virtual void visitFloorF(LFloorF* lir);
+ virtual void visitCeil(LCeil* lir);
+ virtual void visitCeilF(LCeilF* lir);
+ virtual void visitRound(LRound* lir);
+ virtual void visitRoundF(LRoundF* lir);
+ virtual void visitTruncateDToInt32(LTruncateDToInt32* ins);
+ virtual void visitTruncateFToInt32(LTruncateFToInt32* ins);
+
+ void visitWasmTruncateToInt32(LWasmTruncateToInt32* lir);
+ void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
+ void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
+
+ // Out of line visitors.
+ virtual void visitOutOfLineBailout(OutOfLineBailout* ool) = 0;
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+ void visitCopySignD(LCopySignD* ins);
+ void visitCopySignF(LCopySignF* ins);
+
+ protected:
+ virtual ValueOperand ToOutValue(LInstruction* ins) = 0;
+
+ public:
+ CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ void visitValue(LValue* value);
+ void visitDouble(LDouble* ins);
+ void visitFloat32(LFloat32* ins);
+
+ void visitGuardShape(LGuardShape* guard);
+ void visitGuardObjectGroup(LGuardObjectGroup* guard);
+ void visitGuardClass(LGuardClass* guard);
+
+ void visitNegI(LNegI* lir);
+ void visitNegD(LNegD* lir);
+ void visitNegF(LNegF* lir);
+ void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
+ void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
+ void visitWasmCall(LWasmCall* ins);
+ void visitWasmCallI64(LWasmCallI64* ins);
+ void visitWasmLoad(LWasmLoad* ins);
+ void visitWasmUnalignedLoad(LWasmUnalignedLoad* ins);
+ void visitWasmStore(LWasmStore* ins);
+ void visitWasmUnalignedStore(LWasmUnalignedStore* ins);
+ void visitWasmAddOffset(LWasmAddOffset* ins);
+ void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins);
+ void visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEffect* ins);
+
+ void visitWasmStackArg(LWasmStackArg* ins);
+ void visitWasmStackArgI64(LWasmStackArgI64* ins);
+ void visitWasmSelect(LWasmSelect* ins);
+ void visitWasmReinterpret(LWasmReinterpret* ins);
+
+ void visitMemoryBarrier(LMemoryBarrier* ins);
+ void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
+ void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
+ void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
+ void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ // Generating no result.
+ template<typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
+ const T& mem, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ protected:
+ void visitEffectiveAddress(LEffectiveAddress* ins);
+ void visitUDivOrMod(LUDivOrMod* ins);
+
+ public:
+ // Unimplemented SIMD instructions
+ void visitSimdSplatX4(LSimdSplatX4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimd128Int(LSimd128Int* ins) { MOZ_CRASH("NYI"); }
+ void visitSimd128Float(LSimd128Float* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdReinterpretCast(LSimdReinterpretCast* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementI(LSimdExtractElementI* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdExtractElementF(LSimdExtractElementF* ins) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffleI(LSimdGeneralShuffleI* lir) { MOZ_CRASH("NYI"); }
+ void visitSimdGeneralShuffleF(LSimdGeneralShuffleF* lir) { MOZ_CRASH("NYI"); }
+};
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorMIPSShared>
+{
+ LSnapshot* snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize)
+ : snapshot_(snapshot),
+ frameSize_(frameSize)
+ { }
+
+ void accept(CodeGeneratorMIPSShared* codegen);
+
+ LSnapshot* snapshot() const {
+ return snapshot_;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_CodeGenerator_mips_shared_h */
diff --git a/js/src/jit/mips-shared/LIR-mips-shared.h b/js/src/jit/mips-shared/LIR-mips-shared.h
new file mode 100644
index 000000000..466965e84
--- /dev/null
+++ b/js/src/jit/mips-shared/LIR-mips-shared.h
@@ -0,0 +1,408 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_LIR_mips_shared_h
+#define jit_mips_shared_LIR_mips_shared_h
+
+namespace js {
+namespace jit {
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input) {
+ setOperand(0, input);
+ }
+};
+
+
+class LDivI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() {
+ return getOperand(0);
+ }
+
+ int32_t shift() {
+ return shift_;
+ }
+
+ MDiv* mir() const {
+ return mir_->toDiv();
+ }
+};
+
+class LModI : public LBinaryMath<1>
+{
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() {
+ return getTemp(0);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+ int32_t shift()
+ {
+ return shift_;
+ }
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2>
+{
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0, const LDefinition& temp1,
+ int32_t shift)
+ : shift_(shift)
+ {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const {
+ return shift_;
+ }
+
+ MMod* mir() const {
+ return mir_->toMod();
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2>
+{
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ const LAllocation* index() {
+ return getOperand(0);
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() {
+ return getTemp(1);
+ }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3>
+{
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy, const LDefinition& jumpTablePointer,
+ MTableSwitch* ins)
+ {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const {
+ return mir_->toTableSwitch();
+ }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+ const LDefinition* tempFloat() {
+ return getTemp(1);
+ }
+ const LDefinition* tempPointer() {
+ return getTemp(2);
+ }
+};
+
+class LGuardShape : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardShape);
+
+ LGuardShape(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardShape* mir() const {
+ return mir_->toGuardShape();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<0, 1, 1>
+{
+ public:
+ LIR_HEADER(GuardObjectGroup);
+
+ LGuardObjectGroup(const LAllocation& in, const LDefinition& temp) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const MGuardObjectGroup* mir() const {
+ return mir_->toGuardObjectGroup();
+ }
+ const LDefinition* tempInt() {
+ return getTemp(0);
+ }
+};
+
+class LMulI : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(MulI);
+
+ MMul* mir() {
+ return mir_->toMul();
+ }
+};
+
+class LUDivOrMod : public LBinaryMath<0>
+{
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod())
+ return mir_->toMod()->trapOnError();
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, INT64_PIECES, 0>
+{
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const {
+ return mir_->toInt64ToFloatingPoint();
+ }
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumDefs>
+class LWasmUnalignedLoadBase : public details::LWasmLoadBase<NumDefs, 2>
+{
+ public:
+ typedef LWasmLoadBase<NumDefs, 2> Base;
+
+ explicit LWasmUnalignedLoadBase(const LAllocation& ptr, const LDefinition& valueHelper)
+ : Base(ptr)
+ {
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+ const LAllocation* ptr() {
+ return Base::getOperand(0);
+ }
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+};
+
+} // namespace details
+
+class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1>
+{
+ public:
+ explicit LWasmUnalignedLoad(const LAllocation& ptr, const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(ptr, valueHelper)
+ {}
+ LIR_HEADER(WasmUnalignedLoad);
+};
+
+class LWasmUnalignedLoadI64 : public details::LWasmUnalignedLoadBase<INT64_PIECES>
+{
+ public:
+ explicit LWasmUnalignedLoadI64(const LAllocation& ptr, const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(ptr, valueHelper)
+ {}
+ LIR_HEADER(WasmUnalignedLoadI64);
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template<size_t NumOps>
+class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2>
+{
+ public:
+ typedef LInstructionHelper<0, NumOps, 2> Base;
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmUnalignedStoreBase(const LAllocation& ptr, const LDefinition& valueHelper)
+ {
+ Base::setOperand(0, ptr);
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+ MWasmStore* mir() const {
+ return Base::mir_->toWasmStore();
+ }
+ const LAllocation* ptr() {
+ return Base::getOperand(PtrIndex);
+ }
+ const LDefinition* ptrCopy() {
+ return Base::getTemp(0);
+ }
+};
+
+} // namespace details
+
+class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2>
+{
+ public:
+ LIR_HEADER(WasmUnalignedStore);
+ LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(ptr, valueHelper)
+ {
+ setOperand(1, value);
+ }
+ const LAllocation* value() {
+ return Base::getOperand(ValueIndex);
+ }
+};
+
+class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES>
+{
+ public:
+ LIR_HEADER(WasmUnalignedStoreI64);
+ LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(ptr, valueHelper)
+ {
+ setInt64Operand(1, value);
+ }
+ const LInt64Allocation value() {
+ return getInt64Operand(ValueIndex);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_LIR_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
new file mode 100644
index 000000000..f328d16f7
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -0,0 +1,753 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LAllocation
+LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir)
+{
+ return useRegister(mir);
+}
+
+LAllocation
+LIRGeneratorMIPSShared::useByteOpRegisterAtStart(MDefinition* mir)
+{
+ return useRegisterAtStart(mir);
+}
+
+LAllocation
+LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(MDefinition* mir)
+{
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition
+LIRGeneratorMIPSShared::tempByteOpRegister()
+{
+ return temp();
+}
+
+// x = !y
+void
+LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input)
+{
+ ins->setOperand(0, useRegister(input));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void
+LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void
+LIRGeneratorMIPSShared::lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void
+LIRGeneratorMIPSShared::lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ bool needsTemp = false;
+
+#ifdef JS_CODEGEN_MIPS32
+ needsTemp = true;
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorMIPSShared::visitMulI64
+ if (constant >= -1 && constant <= 2)
+ needsTemp = false;
+ if (int64_t(1) << shift == constant)
+ needsTemp = false;
+ }
+#endif
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ lhs != rhs ? useInt64OrConstant(rhs) : useInt64OrConstantAtStart(rhs));
+ if (needsTemp)
+ ins->setTemp(0, temp());
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template<size_t Temps>
+void
+LIRGeneratorMIPSShared::lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+#if defined(JS_NUNBOX32)
+ if (mir->isRotate())
+ ins->setTemp(0, temp());
+#endif
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES, "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES, "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES+1, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input)
+{
+ ins->setOperand(0, useRegister(input));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template<size_t Temps>
+void
+LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(ins, mir, LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, 1>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+void
+LIRGeneratorMIPSShared::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void
+LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs)
+{
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+void
+LIRGeneratorMIPSShared::lowerDivI(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir = new(alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new(alloc()) LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, div);
+}
+
+void
+LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs)
+{
+ LMulI* lir = new(alloc()) LMulI;
+ if (mul->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void
+LIRGeneratorMIPSShared::lowerModI(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir = new(alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new(alloc()) LModMaskI(useRegister(mod->lhs()),
+ temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL),
+ shift + 1);
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir = new(alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+ define(lir, mod);
+}
+
+void
+LIRGeneratorMIPSShared::visitPowHalf(MPowHalf* ins)
+{
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new(alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+LTableSwitch*
+LIRGeneratorMIPSShared::newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV*
+LIRGeneratorMIPSShared::newLTableSwitchV(MTableSwitch* tableswitch)
+{
+ return new(alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)),
+ temp(), tempDouble(), temp(), tableswitch);
+}
+
+void
+LIRGeneratorMIPSShared::visitGuardShape(MGuardShape* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardShape* guard = new(alloc()) LGuardShape(useRegister(ins->object()), tempObj);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorMIPSShared::visitGuardObjectGroup(MGuardObjectGroup* ins)
+{
+ MOZ_ASSERT(ins->object()->type() == MIRType::Object);
+
+ LDefinition tempObj = temp(LDefinition::OBJECT);
+ LGuardObjectGroup* guard = new(alloc()) LGuardObjectGroup(useRegister(ins->object()), tempObj);
+ assignSnapshot(guard, ins->bailoutKind());
+ add(guard, ins);
+ redefine(ins, ins->object());
+}
+
+void
+LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir)
+{
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new(alloc()) LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSNeg(MAsmJSNeg* ins)
+{
+ if (ins->type() == MIRType::Int32) {
+ define(new(alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new(alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new(alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAllocation ptr = useRegisterAtStart(base);
+
+ if (ins->access().isUnaligned()) {
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmUnalignedLoad(ptr, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ define(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmLoadI64(ptr);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmLoad(ptr);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
+{
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* value = ins->value();
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ if (ins->access().isUnaligned()) {
+ if (ins->type() == MIRType::Int64) {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmUnalignedStore(baseAlloc, valueAlloc, temp());
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmStoreI64(baseAlloc, valueAlloc);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new(alloc()) LWasmStore(baseAlloc, valueAlloc);
+ if (ins->access().offset())
+ lir->setTemp(0, tempCopy(base, 0));
+
+ add(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmSelect(MWasmSelect* ins)
+{
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new(alloc()) LWasmSelectI64(useInt64RegisterAtStart(ins->trueExpr()),
+ useInt64(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineInt64ReuseInput(lir, ins, LWasmSelectI64::TrueExprIndex);
+ return;
+ }
+
+ auto* lir = new(alloc()) LWasmSelect(useRegisterAtStart(ins->trueExpr()),
+ use(ins->falseExpr()),
+ useRegister(ins->condExpr()));
+
+ defineReuseInput(lir, ins, LWasmSelect::TrueExprIndex);
+}
+
+void
+LIRGeneratorMIPSShared::lowerUDiv(MDiv* div)
+{
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new(alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+
+ define(lir, div);
+}
+
+void
+LIRGeneratorMIPSShared::lowerUMod(MMod* mod)
+{
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new(alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible())
+ assignSnapshot(lir, Bailout_DoubleOutput);
+
+ define(lir, mod);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new(alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins)
+{
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new(alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins)
+{
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+
+ // For MIPS it is best to keep the 'base' in a register if a bounds check
+ // is needed.
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else
+ baseAlloc = useRegisterAtStart(base);
+
+ define(new(alloc()) LAsmJSLoadHeap(baseAlloc), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
+{
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else
+ baseAlloc = useRegisterAtStart(base);
+
+ add(new(alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value())), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitSubstr(MSubstr* ins)
+{
+ LSubstr* lir = new (alloc()) LSubstr(useRegister(ins->string()),
+ useRegister(ins->begin()),
+ useRegister(ins->length()),
+ temp(),
+ temp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+LIRGeneratorMIPSShared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+ LDefinition uint32Temp = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ uint32Temp = temp();
+
+ LCompareExchangeTypedArrayElement* lir =
+ new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, uint32Temp,
+ /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins)
+{
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ const LAllocation value = useRegister(ins->value());
+ LDefinition uint32Temp = LDefinition::BogusTemp();
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ uint32Temp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new(alloc()) LAtomicExchangeTypedArrayElement(elements, index, value, uint32Temp,
+ /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ LAsmJSCompareExchangeHeap* lir =
+ new(alloc()) LAsmJSCompareExchangeHeap(useRegister(base),
+ useRegister(ins->oldValue()),
+ useRegister(ins->newValue()),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
+{
+ MOZ_ASSERT(ins->base()->type() == MIRType::Int32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ // The output may not be used but will be clobbered regardless,
+ // so ignore the case where we're not using the value and just
+ // use the output register as a temp.
+
+ LAsmJSAtomicExchangeHeap* lir =
+ new(alloc()) LAsmJSAtomicExchangeHeap(base, value,
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
+{
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (!ins->hasUses()) {
+ LAsmJSAtomicBinopHeapForEffect* lir =
+ new(alloc()) LAsmJSAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()),
+ /* flagTemp= */ temp(),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ LAsmJSAtomicBinopHeap* lir =
+ new(alloc()) LAsmJSAtomicBinopHeap(useRegister(base),
+ useRegister(ins->value()),
+ /* temp= */ LDefinition::BogusTemp(),
+ /* flagTemp= */ temp(),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins)
+{
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index = useRegisterOrConstant(ins->index());
+ const LAllocation value = useRegister(ins->value());
+
+ if (!ins->hasUses()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new(alloc()) LAtomicTypedArrayElementBinopForEffect(elements, index, value,
+ /* flagTemp= */ temp(),
+ /* valueTemp= */ temp(),
+ /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition flagTemp = temp();
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
+ outTemp = temp();
+
+ // On mips, map flagTemp to temp1 and outTemp to temp2, at least for now.
+
+ LAtomicTypedArrayElementBinop* lir =
+ new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, flagTemp, outTemp,
+ /* valueTemp= */ temp(), /* offsetTemp= */ temp(),
+ /* maskTemp= */ temp());
+ define(lir, ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void
+LIRGeneratorMIPSShared::visitCopySign(MCopySign* ins)
+{
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double)
+ lir = new(alloc()) LCopySignD();
+ else
+ lir = new(alloc()) LCopySignF();
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void
+LIRGeneratorMIPSShared::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins)
+{
+ defineInt64(new(alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.h b/js/src/jit/mips-shared/Lowering-mips-shared.h
new file mode 100644
index 000000000..a92addfe3
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -0,0 +1,108 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Lowering_mips_shared_h
+#define jit_mips_shared_Lowering_mips_shared_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPSShared : public LIRGeneratorShared
+{
+ protected:
+ LIRGeneratorMIPSShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on MIPS all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ bool needTempForPostBarrier() { return false; }
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs);
+ template<size_t Temps>
+ void lowerForShiftInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template<size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+ void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
+ MDefinition* lhs, MDefinition* rhs)
+ {
+ return lowerForFPU(ins, mir, lhs, rhs);
+ }
+
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void visitPowHalf(MPowHalf* ins);
+ void visitAsmJSNeg(MAsmJSNeg* ins);
+ void visitWasmLoad(MWasmLoad* ins);
+ void visitWasmStore(MWasmStore* ins);
+ void visitWasmSelect(MWasmSelect* ins);
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ public:
+ void lowerPhi(MPhi* phi);
+ void visitGuardShape(MGuardShape* ins);
+ void visitGuardObjectGroup(MGuardObjectGroup* ins);
+ void visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins);
+ void visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins);
+ void visitAsmJSLoadHeap(MAsmJSLoadHeap* ins);
+ void visitAsmJSStoreHeap(MAsmJSStoreHeap* ins);
+ void visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins);
+ void visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins);
+ void visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins);
+ void visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic* ins);
+ void visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins);
+ void visitAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins);
+ void visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop* ins);
+ void visitSubstr(MSubstr* ins);
+ void visitWasmTruncateToInt64(MWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins);
+ void visitCopySign(MCopySign* ins);
+ void visitExtendInt32ToInt64(MExtendInt32ToInt64* ins);
+
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Lowering_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
new file mode 100644
index 000000000..f2eb0c9b2
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -0,0 +1,1030 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_inl_h
+#define jit_mips_shared_MacroAssembler_mips_shared_inl_h
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest)
+{
+ moveFromFloat32(src, dest);
+}
+
+void
+MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest)
+{
+ moveToFloat32(src, dest);
+}
+
+void
+MacroAssembler::move8SignExtend(Register src, Register dest)
+{
+ as_seb(dest, src);
+}
+
+void
+MacroAssembler::move16SignExtend(Register src, Register dest)
+{
+ as_seh(dest, src);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::not32(Register reg)
+{
+ ma_not(reg, reg);
+}
+
+void
+MacroAssembler::and32(Register src, Register dest)
+{
+ as_and(dest, dest, src);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssembler::and32(Imm32 imm, const Address& dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_and(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::and32(const Address& src, Register dest)
+{
+ load32(src, SecondScratchReg);
+ ma_and(dest, SecondScratchReg);
+}
+
+void
+MacroAssembler::or32(Register src, Register dest)
+{
+ ma_or(dest, src);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, Register dest)
+{
+ ma_or(dest, imm);
+}
+
+void
+MacroAssembler::or32(Imm32 imm, const Address& dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_or(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::xor32(Register src, Register dest)
+{
+ ma_xor(dest, src);
+}
+
+void
+MacroAssembler::xor32(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+// ===============================================================
+// Arithmetic instructions
+
+void
+MacroAssembler::add32(Register src, Register dest)
+{
+ as_addu(dest, dest, src);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, Register dest)
+{
+ ma_addu(dest, dest, imm);
+}
+
+void
+MacroAssembler::add32(Imm32 imm, const Address& dest)
+{
+ load32(dest, SecondScratchReg);
+ ma_addu(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, const Address& dest)
+{
+ loadPtr(dest, ScratchRegister);
+ addPtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, dest);
+}
+
+void
+MacroAssembler::addPtr(const Address& src, Register dest)
+{
+ loadPtr(src, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void
+MacroAssembler::addDouble(FloatRegister src, FloatRegister dest)
+{
+ as_addd(dest, dest, src);
+}
+
+void
+MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_adds(dest, dest, src);
+}
+
+void
+MacroAssembler::sub32(Register src, Register dest)
+{
+ as_subu(dest, dest, src);
+}
+
+void
+MacroAssembler::sub32(Imm32 imm, Register dest)
+{
+ ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssembler::sub32(const Address& src, Register dest)
+{
+ load32(src, SecondScratchReg);
+ as_subu(dest, dest, SecondScratchReg);
+}
+
+void
+MacroAssembler::subPtr(Register src, const Address& dest)
+{
+ loadPtr(dest, SecondScratchReg);
+ subPtr(src, SecondScratchReg);
+ storePtr(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::subPtr(const Address& addr, Register dest)
+{
+ loadPtr(addr, SecondScratchReg);
+ subPtr(SecondScratchReg, dest);
+}
+
+void
+MacroAssembler::subDouble(FloatRegister src, FloatRegister dest)
+{
+ as_subd(dest, dest, src);
+}
+
+void
+MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_subs(dest, dest, src);
+}
+
+void
+MacroAssembler::mul32(Register rhs, Register srcDest)
+{
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void
+MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_muls(dest, dest, src);
+}
+
+void
+MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest)
+{
+ as_muld(dest, dest, src);
+}
+
+void
+MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
+{
+ movePtr(imm, ScratchRegister);
+ loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+}
+
+void
+MacroAssembler::quotient32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ if (isUnsigned)
+ as_divu(srcDest, rhs);
+ else
+ as_div(srcDest, rhs);
+ as_mflo(srcDest);
+}
+
+void
+MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
+{
+ if (isUnsigned)
+ as_divu(srcDest, rhs);
+ else
+ as_div(srcDest, rhs);
+ as_mfhi(srcDest);
+}
+
+void
+MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_divs(dest, dest, src);
+}
+
+void
+MacroAssembler::divDouble(FloatRegister src, FloatRegister dest)
+{
+ as_divd(dest, dest, src);
+}
+
+void
+MacroAssembler::neg32(Register reg)
+{
+ ma_negu(reg, reg);
+}
+
+void
+MacroAssembler::negateDouble(FloatRegister reg)
+{
+ as_negd(reg, reg);
+}
+
+void
+MacroAssembler::negateFloat(FloatRegister reg)
+{
+ as_negs(reg, reg);
+}
+
+void
+MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_abss(dest, src);
+}
+
+void
+MacroAssembler::absDouble(FloatRegister src, FloatRegister dest)
+{
+ as_absd(dest, src);
+}
+
+void
+MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_sqrts(dest, src);
+}
+
+void
+MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest)
+{
+ as_sqrtd(dest, src);
+}
+
+void
+MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void
+MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void
+MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN)
+{
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshift32(Register src, Register dest)
+{
+ ma_sll(dest, dest, src);
+}
+
+void
+MacroAssembler::lshift32(Imm32 imm, Register dest)
+{
+ ma_sll(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift32(Register src, Register dest)
+{
+ ma_srl(dest, dest, src);
+}
+
+void
+MacroAssembler::rshift32(Imm32 imm, Register dest)
+{
+ ma_srl(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Register src, Register dest)
+{
+ ma_sra(dest, dest, src);
+}
+
+void
+MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest)
+{
+ ma_sra(dest, dest, imm);
+}
+
+// ===============================================================
+// Rotation functions
+void
+MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
+{
+ if (count.value)
+ ma_rol(dest, input, count);
+ else
+ ma_move(dest, input);
+}
+void
+MacroAssembler::rotateLeft(Register count, Register input, Register dest)
+{
+ ma_rol(dest, input, count);
+}
+void
+MacroAssembler::rotateRight(Imm32 count, Register input, Register dest)
+{
+ if (count.value)
+ ma_ror(dest, input, count);
+ else
+ ma_move(dest, input);
+}
+void
+MacroAssembler::rotateRight(Register count, Register input, Register dest)
+{
+ ma_ror(dest, input, count);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz32(Register src, Register dest, bool knownNotZero)
+{
+ as_clz(dest, src);
+}
+
+void
+MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
+{
+ ma_ctz(dest, src);
+}
+
+void
+MacroAssembler::popcnt32(Register input, Register output, Register tmp)
+{
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+ ma_move(output, input);
+ ma_sra(tmp, input, Imm32(1));
+ ma_and(tmp, Imm32(0x55555555));
+ ma_subu(output, tmp);
+ ma_sra(tmp, output, Imm32(2));
+ ma_and(output, Imm32(0x33333333));
+ ma_and(tmp, Imm32(0x33333333));
+ ma_addu(output, tmp);
+ ma_srl(tmp, output, Imm32(4));
+ ma_addu(output, tmp);
+ ma_and(output, Imm32(0xF0F0F0F));
+ ma_sll(tmp, output, Imm32(8));
+ ma_addu(output, tmp);
+ ma_sll(tmp, output, Imm32(16));
+ ma_addu(output, tmp);
+ ma_sra(output, output, Imm32(24));
+}
+
+// ===============================================================
+// Branch functions
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Register rhs, L label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm, L label)
+{
+ ma_b(lhs, imm, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void
+MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr, Imm32 imm, Label* label)
+{
+ load32(addr, SecondScratchReg);
+ ma_b(SecondScratchReg, imm, label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label)
+{
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs, L label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
+{
+ movePtr(rhs, ScratchRegister);
+ Label skipJump;
+ ma_b(lhs, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+ CodeOffsetJump off = jumpWithPatch(label);
+ bind(&skipJump);
+ return off;
+}
+
+template <typename T>
+CodeOffsetJump
+MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ movePtr(rhs, ScratchRegister);
+ Label skipJump;
+ ma_b(SecondScratchReg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+ CodeOffsetJump off = jumpWithPatch(label);
+ bind(&skipJump);
+ return off;
+}
+
+void
+MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ ma_bc1s(lhs, rhs, label, cond);
+}
+
+void
+MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ Label test, success;
+ as_truncws(ScratchFloat32Reg, src);
+ as_mfc1(dest, ScratchFloat32Reg);
+
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ convertFloat32ToInt32(src, dest, fail);
+}
+
+void
+MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
+ Label* label)
+{
+ ma_bc1d(lhs, rhs, label, cond);
+}
+
+// Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT32_MIN/INT32_MAX, and we can test it.
+// NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
+// will be wrong.
+void
+MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
+{
+ Label test, success;
+ as_truncwd(ScratchDoubleReg, src);
+ as_mfc1(dest, ScratchDoubleReg);
+
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+}
+
+void
+MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
+{
+ convertDoubleToInt32(src, dest, fail);
+}
+
+template <typename T, typename L>
+void
+MacroAssembler::branchAdd32(Condition cond, T src, Register dest, L overflow)
+{
+ switch (cond) {
+ case Overflow:
+ ma_addTestOverflow(dest, dest, src, overflow);
+ break;
+ case CarrySet:
+ ma_addTestCarry(dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void
+MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* overflow)
+{
+ switch (cond) {
+ case Overflow:
+ ma_subTestOverflow(dest, dest, src, overflow);
+ break;
+ case NonZero:
+ case Zero:
+ ma_subu(dest, src);
+ ma_b(dest, dest, overflow, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+void
+MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+template <class L>
+void
+MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
+{
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs, L label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
+{
+ loadPtr(lhs, SecondScratchReg);
+ branchTestPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value, Label* label)
+{
+ ma_lid(ScratchDoubleReg, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc1d(value, ScratchDoubleReg, label, cond);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label, actual);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestGCThing(Condition cond, const Address& address, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+void
+MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestMagic(cond, scratch2, label);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address, Label* label)
+{
+ SecondScratchRegisterScope scratch2(*this);
+ extractTag(address, scratch2);
+ branchTestMagic(cond, scratch2, label);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const Address& dest)
+{
+ MOZ_CRASH("NYI");
+}
+void
+MacroAssembler::storeFloat32x3(FloatRegister src, const BaseIndex& dest)
+{
+ MOZ_CRASH("NYI");
+}
+
+void
+MacroAssembler::memoryBarrier(MemoryBarrierBits barrier)
+{
+ if (barrier == MembarLoadLoad)
+ as_sync(19);
+ else if (barrier == MembarStoreStore)
+ as_sync(4);
+ else if (barrier & MembarSynchronizing)
+ as_sync();
+ else if (barrier)
+ as_sync(16);
+}
+
+// ===============================================================
+// Clamping functions.
+
+void
+MacroAssembler::clampIntToUint8(Register reg)
+{
+ // If reg is < 0, then we want to clamp to 0.
+ as_slti(ScratchRegister, reg, 0);
+ as_movn(reg, zero, ScratchRegister);
+
+ // If reg is >= 255, then we want to clamp to 255.
+ ma_li(SecondScratchReg, Imm32(255));
+ as_slti(ScratchRegister, reg, 255);
+ as_movz(reg, SecondScratchReg, ScratchRegister);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_inl_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
new file mode 100644
index 000000000..18997e542
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -0,0 +1,1728 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+#include "jit/MacroAssembler.h"
+
+using namespace js;
+using namespace jit;
+
+void
+MacroAssemblerMIPSShared::ma_move(Register rd, Register rs)
+{
+ as_or(rd, rs, zero);
+}
+
+void
+MacroAssemblerMIPSShared::ma_li(Register dest, ImmGCPtr ptr)
+{
+ writeDataRelocation(ptr);
+ asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+
+void
+MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(dest, zero, imm.value);
+ } else if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(dest, zero, Imm16::Lower(imm).encode());
+ } else if (Imm16::Lower(imm).encode() == 0) {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ } else {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+ }
+}
+
+// Shifts
+void
+MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift)
+{
+ as_sll(rd, rt, shift.value % 32);
+}
+void
+MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Imm32 shift)
+{
+ as_srl(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift)
+{
+ as_sra(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift)
+{
+ as_rotr(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift)
+{
+ as_rotr(rd, rt, 32 - (shift.value % 32));
+}
+
+void
+MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Register shift)
+{
+ as_sllv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Register shift)
+{
+ as_srlv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Register shift)
+{
+ as_srav(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Register shift)
+{
+ as_rotrv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Register shift)
+{
+ ma_negu(ScratchRegister, shift);
+ as_rotrv(rd, rt, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_negu(Register rd, Register rs)
+{
+ as_subu(rd, zero, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_not(Register rd, Register rs)
+{
+ as_nor(rd, rs, zero);
+}
+
+// And.
+void
+MacroAssemblerMIPSShared::ma_and(Register rd, Register rs)
+{
+ as_and(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_and(Register rd, Imm32 imm)
+{
+ ma_and(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_and(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_andi(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_and(rd, rs, ScratchRegister);
+ }
+}
+
+// Or.
+void
+MacroAssemblerMIPSShared::ma_or(Register rd, Register rs)
+{
+ as_or(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_or(Register rd, Imm32 imm)
+{
+ ma_or(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_or(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_or(rd, rs, ScratchRegister);
+ }
+}
+
+// xor
+void
+MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs)
+{
+ as_xor(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_xor(Register rd, Imm32 imm)
+{
+ ma_xor(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_xori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_xor(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_ctz(Register rd, Register rs)
+{
+ ma_negu(ScratchRegister, rs);
+ as_and(rd, ScratchRegister, rs);
+ as_clz(rd, rd);
+ ma_negu(SecondScratchReg, rd);
+ ma_addu(SecondScratchReg, Imm32(0x1f));
+ as_movn(rd, SecondScratchReg, ScratchRegister);
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void
+MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs)
+{
+ as_addu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm)
+{
+ ma_addu(rd, rd, imm);
+}
+
+template <typename L>
+void
+MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Register rt, L overflow)
+{
+ as_addu(rd, rs, rt);
+ as_sltu(SecondScratchReg, rd, rs);
+ ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
+}
+
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
+ Register rt, Label* overflow);
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<wasm::TrapDesc>(Register rd, Register rs, Register rt,
+ wasm::TrapDesc overflow);
+
+template <typename L>
+void
+MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_addTestCarry(rd, rs, ScratchRegister, overflow);
+}
+
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<Label*>(Register rd, Register rs,
+ Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPSShared::ma_addTestCarry<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
+ wasm::TrapDesc overflow);
+
+// Subtract.
+void
+MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm)
+{
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_addiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_subu(rd, rs, ScratchRegister);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_subu(Register rd, Imm32 imm)
+{
+ ma_subu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs)
+{
+ as_subu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPSShared::ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_addTestOverflow(rd, rs, Imm32(-imm.value), overflow);
+ } else {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ asMasm().ma_subTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_mul(Register rd, Register rs, Imm32 imm)
+{
+ ma_li(ScratchRegister, imm);
+ as_mul(rd, rs, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ as_mult(rs, rt);
+ as_mflo(rd);
+ as_sra(ScratchRegister, rd, 31);
+ as_mfhi(SecondScratchReg);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPSShared::ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_mul_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ as_div(rs, rt);
+ as_mfhi(ScratchRegister);
+ ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
+ as_mflo(rd);
+}
+
+void
+MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow)
+{
+ ma_li(ScratchRegister, imm);
+ ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPSShared::ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero)
+{
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ ma_move(remain, src);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ ma_negu(remain, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(SecondScratchReg, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_addu(dest, dest, SecondScratchReg);
+ // Do a trial subtraction
+ ma_subu(SecondScratchReg, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
+ ma_move(dest, SecondScratchReg);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srl(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ ma_negu(dest, dest);
+ } else {
+ ma_negu(dest, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ if (isLoongson() && ZeroExtend != extension && Imm8::IsInSignedRange(src.offset)) {
+ Register index = src.index;
+
+ if (src.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(src.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != src.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, src.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, src.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, src.base, index, src.offset);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, src.base, index, src.offset);
+ break;
+ case SizeWord:
+ as_gslwx(dest, src.base, index, src.offset);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, src.base, index, src.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(src.offset).encode();
+ hiOffset = Imm16(src.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ as_lbu(dest, base, lowOffset);
+ if (extension != ZeroExtend)
+ as_lbu(temp, base, hiOffset);
+ else
+ as_lb(temp, base, hiOffset);
+ as_ins(dest, temp, 8, 24);
+ break;
+ case SizeWord:
+ as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension != ZeroExtend)
+ as_dext(dest, dest, 0, 32);
+#endif
+ break;
+ case SizeDouble:
+ as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register index = dest.index;
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
+ asMasm().ma_store(data, Address(SecondScratchReg, dest.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register data = zero;
+ Register index = dest.index;
+
+ if (imm.value) {
+ MOZ_ASSERT(ScratchRegister != dest.base);
+ MOZ_ASSERT(ScratchRegister != dest.index);
+ data = ScratchRegister;
+ ma_li(data, imm);
+ }
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ // Make sure that SecondScratchReg contains absolute address so that
+ // offset is 0.
+ asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(ScratchRegister, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size, extension);
+}
+
+void
+MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(dest.offset).encode();
+ hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ as_sb(data, base, lowOffset);
+ as_ext(temp, data, 8, 8);
+ as_sb(temp, base, hiOffset);
+ break;
+ case SizeWord:
+ as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+ case SizeDouble:
+ as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Condition c, JumpKind jumpKind)
+{
+ switch (c) {
+ case Equal :
+ case NotEqual:
+ asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
+ break;
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual)
+ ma_b(label, jumpKind);
+ else if (c == Below)
+ ; // This condition is always false. No branch required.
+ else
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ } else {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind)
+{
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPSShared::ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind)
+{
+ Label label;
+ ma_b(lhs, rhs, &label, c, jumpKind);
+ bindLater(&label, target);
+}
+
+template void MacroAssemblerMIPSShared::ma_b<Register>(Register lhs, Register rhs,
+ wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind);
+template void MacroAssemblerMIPSShared::ma_b<Imm32>(Register lhs, Imm32 rhs,
+ wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind);
+template void MacroAssemblerMIPSShared::ma_b<ImmTag>(Register lhs, ImmTag rhs,
+ wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind);
+
+void
+MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind)
+{
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPSShared::ma_b(wasm::TrapDesc target, JumpKind jumpKind)
+{
+ Label label;
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), &label, jumpKind);
+ bindLater(&label, target);
+}
+
+Assembler::Condition
+MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
+{
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(scratch, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(scratch, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(scratch, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(scratch, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(scratch, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(scratch, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(scratch, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(scratch, rhs, lhs);
+ return Equal;
+ case Equal :
+ case NotEqual:
+ case Zero:
+ case NonZero:
+ case Always:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("There is a better way to compare for equality.");
+ break;
+ case Overflow:
+ MOZ_CRASH("Overflow condition not supported for MIPS.");
+ break;
+ default:
+ MOZ_CRASH("Invalid condition for branch.");
+ }
+ return Always;
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt, Condition c)
+{
+ switch (c) {
+ case Equal :
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rs, rt);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rs, rt);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rt, rs);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rs, rt);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rt, rs);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rs, rt);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rs == rt);
+ // seq d,s,$zero =>
+ // xor d,s,$zero
+ // sltiu d,d,1
+ as_xor(rd, rs, zero);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NonZero:
+ // sne d,s,$zero =>
+ // xor d,s,$zero
+ // sltu d,$zero,d
+ as_xor(rd, rs, zero);
+ as_sltu(rd, zero, rd);
+ break;
+ case Signed:
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition for ma_cmp_set.");
+ }
+}
+
+void
+MacroAssemblerMIPSShared::compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c, FloatTestKind* testKind,
+ FPConditionBit fcc)
+{
+ switch (c) {
+ case DoubleOrdered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleEqual:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqual:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThan:
+ as_colt(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_cole(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThan:
+ as_colt(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqual:
+ as_cole(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleUnordered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleEqualOrUnordered:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_cult(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_cule(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrUnordered:
+ as_cult(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_cule(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ default:
+ MOZ_CRASH("Invalid DoubleCondition.");
+ }
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c)
+{
+ ma_li(dest, Imm32(0));
+ ma_li(ScratchRegister, Imm32(1));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+
+ if (moveCondition == TestForTrue)
+ as_movt(dest, ScratchRegister);
+ else
+ as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c)
+{
+ ma_li(dest, Imm32(0));
+ ma_li(ScratchRegister, Imm32(1));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+
+ if (moveCondition == TestForTrue)
+ as_movt(dest, ScratchRegister);
+ else
+ as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
+{
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value)
+{
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, wasm::RawF32 value)
+{
+ Imm32 imm(value.bits());
+
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSShared::ma_liNegZero(FloatRegister dest)
+{
+ moveToDoubleLo(zero, dest);
+ ma_li(ScratchRegister, Imm32(INT_MIN));
+ asMasm().moveToDoubleHi(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gssdx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_sd(ft, Address(SecondScratchReg, address.offset));
+}
+
+void
+MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address)
+{
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gsssx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
+}
+
+void
+MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+ FloatTestKind testKind;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+ FloatTestKind testKind;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPSShared::minMaxDouble(FloatRegister srcDest, FloatRegister second,
+ bool handleNaN, bool isMax)
+{
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax
+ ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ // Make sure we handle -0 and 0 right.
+ ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(DoubleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(DoubleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ compareFloatingPoint(DoubleFloat, first, ScratchDoubleReg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_addd(ScratchDoubleReg, first, second);
+ } else {
+ as_negd(ScratchDoubleReg, first);
+ as_subd(ScratchDoubleReg, ScratchDoubleReg, second);
+ as_negd(ScratchDoubleReg, ScratchDoubleReg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(DoubleFloat, first, ScratchDoubleReg);
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantDouble(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest, FloatRegister second,
+ bool handleNaN, bool isMax)
+{
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax
+ ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1s(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ // Make sure we handle -0 and 0 right.
+ ma_bc1s(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(SingleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(SingleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ compareFloatingPoint(SingleFloat, first, ScratchFloat32Reg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_adds(ScratchFloat32Reg, first, second);
+ } else {
+ as_negs(ScratchFloat32Reg, first);
+ as_subs(ScratchFloat32Reg, ScratchFloat32Reg, second);
+ as_negs(ScratchFloat32Reg, ScratchFloat32Reg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(SingleFloat, first, ScratchFloat32Reg);
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantFloat32(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSShared::ma_call(ImmPtr dest)
+{
+ asMasm().ma_liPatchable(CallReg, dest);
+ as_jalr(CallReg);
+ as_nop();
+}
+
+void
+MacroAssemblerMIPSShared::ma_jump(ImmPtr dest)
+{
+ asMasm().ma_liPatchable(ScratchRegister, dest);
+ as_jr(ScratchRegister);
+ as_nop();
+}
+
+MacroAssembler&
+MacroAssemblerMIPSShared::asMasm()
+{
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler&
+MacroAssemblerMIPSShared::asMasm() const
+{
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOpMIPSr2(int nbytes, AtomicOp op,
+ const Register& value, const Register& addr,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp)
+{
+ atomicFetchOpMIPSr2(nbytes, false, op, value, addr, flagTemp,
+ valueTemp, offsetTemp, maskTemp, InvalidReg);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const Register& addr, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ Label again;
+
+ as_andi(offsetTemp, addr, 3);
+ asMasm().subPtr(offsetTemp, addr);
+ as_sll(offsetTemp, offsetTemp, 3);
+ ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ as_sllv(maskTemp, maskTemp, offsetTemp);
+
+ bind(&again);
+
+ as_sync(16);
+
+ as_ll(flagTemp, addr, 0);
+
+ as_sllv(valueTemp, value, offsetTemp);
+ if (output != InvalidReg) {
+ as_and(output, flagTemp, maskTemp);
+ as_srlv(output, output, offsetTemp);
+ if (signExtend) {
+ switch (nbytes) {
+ case 1:
+ as_seb(output, output);
+ break;
+ case 2:
+ as_seh(output, output);
+ break;
+ case 4:
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+ }
+ }
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ as_addu(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchSubOp:
+ as_subu(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchAndOp:
+ as_and(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchOrOp:
+ as_or(valueTemp, flagTemp, valueTemp);
+ break;
+ case AtomicFetchXorOp:
+ as_xor(valueTemp, flagTemp, valueTemp);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+
+ as_and(valueTemp, valueTemp, maskTemp);
+ as_or(flagTemp, flagTemp, maskTemp);
+ as_xor(flagTemp, flagTemp, maskTemp);
+ as_or(flagTemp, flagTemp, valueTemp);
+
+ as_sc(flagTemp, addr, 0);
+
+ ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
+
+ as_sync(0);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const Address& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value,
+ const BaseIndex& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const Address& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicEffectOp(int nbytes, AtomicOp op, const Register& value,
+ const BaseIndex& address, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicEffectOpMIPSr2(nbytes, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ ma_li(SecondScratchReg, value);
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, SecondScratchReg, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ atomicFetchOpMIPSr2(nbytes, signExtend, op, value, ScratchRegister,
+ flagTemp, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr,
+ Register oldval, Register newval, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output)
+{
+ Label again, end;
+
+ as_andi(offsetTemp, addr, 3);
+ asMasm().subPtr(offsetTemp, addr);
+ as_sll(offsetTemp, offsetTemp, 3);
+ ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ as_sllv(maskTemp, maskTemp, offsetTemp);
+
+ bind(&again);
+
+ as_sync(16);
+
+ as_ll(flagTemp, addr, 0);
+
+ as_and(output, flagTemp, maskTemp);
+ // If oldval is valid register, do compareExchange
+ if (InvalidReg != oldval) {
+ as_sllv(valueTemp, oldval, offsetTemp);
+ as_and(valueTemp, valueTemp, maskTemp);
+ ma_b(output, valueTemp, &end, NotEqual, ShortJump);
+ }
+
+ as_sllv(valueTemp, newval, offsetTemp);
+ as_and(valueTemp, valueTemp, maskTemp);
+ as_or(flagTemp, flagTemp, maskTemp);
+ as_xor(flagTemp, flagTemp, maskTemp);
+ as_or(flagTemp, flagTemp, valueTemp);
+
+ as_sc(flagTemp, addr, 0);
+
+ ma_b(flagTemp, flagTemp, &again, Zero, ShortJump);
+
+ as_sync(0);
+
+ bind(&end);
+
+ as_srlv(output, output, offsetTemp);
+ if (signExtend) {
+ switch (nbytes) {
+ case 1:
+ as_seb(output, output);
+ break;
+ case 2:
+ as_seh(output, output);
+ break;
+ case 4:
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+ }
+}
+
+void
+MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const Address& address,
+ Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::compareExchange(int nbytes, bool signExtend, const BaseIndex& address,
+ Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, oldval, newval, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const Address& address,
+ Register value, Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void
+MacroAssemblerMIPSShared::atomicExchange(int nbytes, bool signExtend, const BaseIndex& address,
+ Register value, Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output)
+{
+ asMasm().computeEffectiveAddress(address, ScratchRegister);
+ compareExchangeMIPSr2(nbytes, signExtend, ScratchRegister, InvalidReg, value, SecondScratchReg,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void
+MacroAssembler::flush()
+{
+}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::Push(Register reg)
+{
+ ma_push(reg);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const Imm32 imm)
+{
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmWord imm)
+{
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(const ImmPtr imm)
+{
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssembler::Push(const ImmGCPtr ptr)
+{
+ ma_li(ScratchRegister, ptr);
+ ma_push(ScratchRegister);
+ adjustFrame(sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Push(FloatRegister f)
+{
+ ma_push(f);
+ adjustFrame(sizeof(double));
+}
+
+void
+MacroAssembler::Pop(Register reg)
+{
+ ma_pop(reg);
+ adjustFrame(-sizeof(intptr_t));
+}
+
+void
+MacroAssembler::Pop(FloatRegister f)
+{
+ ma_pop(f);
+ adjustFrame(-sizeof(double));
+}
+
+void
+MacroAssembler::Pop(const ValueOperand& val)
+{
+ popValue(val);
+ framePushed_ -= sizeof(Value);
+}
+
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset
+MacroAssembler::call(Register reg)
+{
+ as_jalr(reg);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset
+MacroAssembler::call(Label* label)
+{
+ ma_bal(label);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset
+MacroAssembler::callWithPatch()
+{
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ addPtr(Imm32(5 * sizeof(uint32_t)), ra);
+ // Allocate space which will be patched by patchCall().
+ writeInst(UINT32_MAX);
+ as_lw(ScratchRegister, ra, -(int32_t)(5 * sizeof(uint32_t)));
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+void
+MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset)
+{
+ BufferOffset call(callerOffset - 7 * sizeof(uint32_t));
+
+ BOffImm16 offset = BufferOffset(calleeOffset).diffB<BOffImm16>(call);
+ if (!offset.isInvalid()) {
+ InstImm* bal = (InstImm*)editSrc(call);
+ bal->setBOffImm16(offset);
+ } else {
+ uint32_t u32Offset = callerOffset - 5 * sizeof(uint32_t);
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
+ *u32 = calleeOffset - callerOffset;
+ }
+}
+
+CodeOffset
+MacroAssembler::farJumpWithPatch()
+{
+ ma_move(SecondScratchReg, ra);
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ as_lw(ScratchRegister, ra, 0);
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ writeInst(UINT32_MAX);
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ ma_move(ra, SecondScratchReg);
+ return farJump;
+}
+
+void
+MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset)
+{
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+void
+MacroAssembler::repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset)
+{
+ uint32_t* u32 = reinterpret_cast<uint32_t*>(code + farJumpOffset);
+ *u32 = targetOffset - farJumpOffset;
+}
+
+CodeOffset
+MacroAssembler::nopPatchableToNearJump()
+{
+ CodeOffset offset(currentOffset());
+ as_nop();
+ as_nop();
+ return offset;
+}
+
+void
+MacroAssembler::patchNopToNearJump(uint8_t* jump, uint8_t* target)
+{
+ new (jump) InstImm(op_beq, zero, zero, BOffImm16(target - jump));
+}
+
+void
+MacroAssembler::patchNearJumpToNop(uint8_t* jump)
+{
+ new (jump) InstNOP();
+}
+
+void
+MacroAssembler::call(wasm::SymbolicAddress target)
+{
+ movePtr(target, CallReg);
+ call(CallReg);
+}
+
+void
+MacroAssembler::call(ImmWord target)
+{
+ call(ImmPtr((void*)target.value));
+}
+
+void
+MacroAssembler::call(ImmPtr target)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, Relocation::HARDCODED);
+ ma_call(target);
+}
+
+void
+MacroAssembler::call(JitCode* c)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ callJitNoProfiler(ScratchRegister);
+}
+
+void
+MacroAssembler::pushReturnAddress()
+{
+ push(ra);
+}
+
+void
+MacroAssembler::popReturnAddress()
+{
+ pop(ra);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t
+MacroAssembler::pushFakeReturnAddress(Register scratch)
+{
+ CodeLabel cl;
+
+ ma_li(scratch, cl.patchAt());
+ Push(scratch);
+ bind(cl.target());
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+void
+MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != SecondScratchReg);
+
+ movePtr(ptr, SecondScratchReg);
+ orPtr(Imm32(gc::ChunkMask), SecondScratchReg);
+ branch32(cond, Address(SecondScratchReg, gc::ChunkLocationOffsetFromLastByte),
+ Imm32(int32_t(gc::ChunkLocation::Nursery)), label);
+}
+
+void
+MacroAssembler::comment(const char* msg)
+{
+ Assembler::comment(msg);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
new file mode 100644
index 000000000..c9bd4a4d9
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_h
+#define jit_mips_shared_MacroAssembler_mips_shared_h
+
+#if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Assembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+
+#include "jit/AtomicOp.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize
+{
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension
+{
+ ZeroExtend = 0,
+ SignExtend = 1
+};
+
+enum JumpKind
+{
+ LongJump = 0,
+ ShortJump = 1
+};
+
+enum DelaySlotFill
+{
+ DontFillDelaySlot = 0,
+ FillDelaySlot = 1
+};
+
+static Register CallReg = t9;
+
+class MacroAssemblerMIPSShared : public Assembler
+{
+ protected:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c, FloatTestKind* testKind,
+ FPConditionBit fcc = FCC0);
+
+ public:
+ void ma_move(Register rd, Register rs);
+
+ void ma_li(Register dest, ImmGCPtr ptr);
+
+ void ma_li(Register dest, Imm32 imm);
+
+ // Shift operations
+ void ma_sll(Register rd, Register rt, Imm32 shift);
+ void ma_srl(Register rd, Register rt, Imm32 shift);
+ void ma_sra(Register rd, Register rt, Imm32 shift);
+ void ma_ror(Register rd, Register rt, Imm32 shift);
+ void ma_rol(Register rd, Register rt, Imm32 shift);
+
+ void ma_sll(Register rd, Register rt, Register shift);
+ void ma_srl(Register rd, Register rt, Register shift);
+ void ma_sra(Register rd, Register rt, Register shift);
+ void ma_ror(Register rd, Register rt, Register shift);
+ void ma_rol(Register rd, Register rt, Register shift);
+
+ // Negate
+ void ma_negu(Register rd, Register rs);
+
+ void ma_not(Register rd, Register rs);
+
+ // and
+ void ma_and(Register rd, Register rs);
+ void ma_and(Register rd, Imm32 imm);
+ void ma_and(Register rd, Register rs, Imm32 imm);
+
+ // or
+ void ma_or(Register rd, Register rs);
+ void ma_or(Register rd, Imm32 imm);
+ void ma_or(Register rd, Register rs, Imm32 imm);
+
+ // xor
+ void ma_xor(Register rd, Register rs);
+ void ma_xor(Register rd, Imm32 imm);
+ void ma_xor(Register rd, Register rs, Imm32 imm);
+
+ void ma_ctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // store
+ void ma_store(Register data, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // arithmetic based ops
+ // add
+ void ma_addu(Register rd, Register rs, Imm32 imm);
+ void ma_addu(Register rd, Register rs);
+ void ma_addu(Register rd, Imm32 imm);
+ template <typename L>
+ void ma_addTestCarry(Register rd, Register rs, Register rt, L overflow);
+ template <typename L>
+ void ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow);
+
+ // subtract
+ void ma_subu(Register rd, Register rs, Imm32 imm);
+ void ma_subu(Register rd, Register rs);
+ void ma_subu(Register rd, Imm32 imm);
+ void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul(Register rd, Register rs, Imm32 imm);
+ void ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label* overflow);
+ void ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rs, Register rt, Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, l, c, jumpKind);
+ }
+ template <typename T>
+ void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
+ JumpKind jumpKind = LongJump);
+
+ void ma_b(Label* l, JumpKind jumpKind = LongJump);
+ void ma_b(wasm::TrapDesc target, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+ void ma_lis(FloatRegister dest, wasm::RawF32 value);
+ void ma_liNegZero(FloatRegister dest);
+
+ void ma_sd(FloatRegister fd, BaseIndex address);
+ void ma_ss(FloatRegister fd, BaseIndex address);
+
+ //FP branches
+ void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
+ JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
+ JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+
+ void moveToDoubleLo(Register src, FloatRegister dest) {
+ as_mtc1(src, dest);
+ }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) {
+ as_mtc1(src, dest);
+ }
+ void moveFromFloat32(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Handle NaN specially if handleNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
+
+ private:
+ void atomicEffectOpMIPSr2(int nbytes, AtomicOp op, const Register& value, const Register& addr,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value, const Register& addr,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+ void compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr, Register oldval,
+ Register newval, Register flagTemp, Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output);
+
+ protected:
+ void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const Address& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const BaseIndex& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const Address& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+ void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const BaseIndex& address,
+ Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
+
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const Address& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+ void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
+ const BaseIndex& address, Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output);
+
+ void compareExchange(int nbytes, bool signExtend, const Address& address, Register oldval,
+ Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+ void compareExchange(int nbytes, bool signExtend, const BaseIndex& address, Register oldval,
+ Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+
+ void atomicExchange(int nbytes, bool signExtend, const Address& address, Register value,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+ void atomicExchange(int nbytes, bool signExtend, const BaseIndex& address, Register value,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ Register output);
+
+ public:
+ struct AutoPrepareForPatching {
+ explicit AutoPrepareForPatching(MacroAssemblerMIPSShared&) {}
+ };
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
new file mode 100644
index 000000000..f1e1fd514
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
@@ -0,0 +1,223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MoveEmitterMIPSShared::emit(const MoveResolver& moves)
+{
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ masm.reserveStack(moves.numCycles() * sizeof(double));
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++)
+ emit(moves.getMove(i));
+}
+
+Address
+MoveEmitterMIPSShared::cycleSlot(uint32_t slot, uint32_t subslot) const
+{
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::IsInSignedRange(offset));
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t
+MoveEmitterMIPSShared::getAdjustedOffset(const MoveOperand& operand)
+{
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer)
+ return operand.disp();
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address
+MoveEmitterMIPSShared::getAdjustedAddress(const MoveOperand& operand)
+{
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+
+Register
+MoveEmitterMIPSShared::tempReg()
+{
+ spilledReg_ = SecondScratchReg;
+ return SecondScratchReg;
+}
+
+void
+MoveEmitterMIPSShared::emitMove(const MoveOperand& from, const MoveOperand& to)
+{
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg())
+ masm.movePtr(from.reg(), to.reg());
+ else if (to.isMemory())
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ else
+ MOZ_CRASH("Invalid emitMove arguments.");
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.loadPtr(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void
+MoveEmitterMIPSShared::emitInt32Move(const MoveOperand &from, const MoveOperand &to)
+{
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg())
+ masm.move32(from.reg(), to.reg());
+ else if (to.isMemory())
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ else
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.load32(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void
+MoveEmitterMIPSShared::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
+{
+ // Ensure that we can use ScratchFloat32Reg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg, getAdjustedAddress(to));
+ }
+}
+
+void
+MoveEmitterMIPSShared::emit(const MoveOp& move)
+{
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPSShared::assertDone()
+{
+ MOZ_ASSERT(inCycle_ == 0);
+}
+
+void
+MoveEmitterMIPSShared::finish()
+{
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.h b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
new file mode 100644
index 000000000..b7f794c53
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MoveEmitter_mips_shared_h
+#define jit_mips_shared_MoveEmitter_mips_shared_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPSShared
+{
+ protected:
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ virtual void emitDoubleMove(const MoveOperand& from, const MoveOperand& to) = 0;
+ virtual void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ virtual void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterMIPSShared(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg)
+ { }
+ ~MoveEmitterMIPSShared() {
+ assertDone();
+ }
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {}
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MoveEmitter_mips_shared_h */
diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
new file mode 100644
index 000000000..e665c92dd
--- /dev/null
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
@@ -0,0 +1,382 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_SharedICHelpers_mips_shared_h
+#define jit_mips_shared_SharedICHelpers_mips_shared_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineIC.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on MIPS).
+static const size_t ICStackValueOffset = 0;
+
+inline void
+EmitRestoreTailCallReg(MacroAssembler& masm)
+{
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitRepushTailCallReg(MacroAssembler& masm)
+{
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void
+EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm)
+{
+ // Move ICEntry offset into ICStubReg.
+ CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg);
+ *patchOffset = offset;
+
+ // Load stub pointer into ICStubReg.
+ masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+}
+
+inline void
+EmitEnterTypeMonitorIC(MacroAssembler& masm,
+ size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub())
+{
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry.
+ // R2 won't be active when we call ICs, so we can use it.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Jump to the stubcode.
+ masm.branch(R2.scratchReg());
+}
+
+inline void
+EmitReturnFromIC(MacroAssembler& masm)
+{
+ masm.branch(ra);
+}
+
+inline void
+EmitChangeICReturnAddress(MacroAssembler& masm, Register reg)
+{
+ masm.movePtr(reg, ra);
+}
+
+inline void
+EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
+{
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, scratch);
+ masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subPtr(BaselineStackReg, scratch);
+
+ // Store frame size without VMFunction arguments for GC marking.
+ masm.subPtr(Imm32(argSize), scratch);
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+ masm.addPtr(Imm32(argSize), scratch);
+
+ // Push frame descriptor and perform the tail call.
+ // ICTailCallReg (ra) already contains the return address (as we
+ // keep it there through the stub calls), but the VMWrapper code being
+ // called expects the return address to also be pushed on the stack.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+ masm.subPtr(Imm32(sizeof(CommonFrameLayout)), StackPointer);
+ masm.storePtr(scratch, Address(StackPointer, CommonFrameLayout::offsetOfDescriptor()));
+ masm.storePtr(ra, Address(StackPointer, CommonFrameLayout::offsetOfReturnAddress()));
+
+ masm.branch(target);
+}
+
+inline void
+EmitIonTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t stackSize)
+{
+ Register scratch = R2.scratchReg();
+
+ masm.loadPtr(Address(sp, stackSize), scratch);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch);
+ masm.addPtr(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch);
+
+ // Push frame descriptor and perform the tail call.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.push(ICTailCallReg);
+ masm.branch(target);
+}
+
+inline void
+EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize)
+{
+ // Compute stub frame size. We have to add two pointers: the stub reg and
+ // previous frame pointer pushed by EmitEnterStubFrame.
+ masm.movePtr(BaselineFrameReg, reg);
+ masm.addPtr(Imm32(sizeof(intptr_t) * 2), reg);
+ masm.subPtr(BaselineStackReg, reg);
+
+ masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
+}
+
+inline void
+EmitBaselineCallVM(JitCode* target, MacroAssembler& masm)
+{
+ Register scratch = R2.scratchReg();
+ EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size());
+ masm.push(scratch);
+ masm.call(target);
+}
+
+inline void
+EmitIonCallVM(JitCode* target, size_t stackSlots, MacroAssembler& masm)
+{
+ uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonStub,
+ ExitFrameLayout::Size());
+ masm.Push(Imm32(descriptor));
+ masm.callJit(target);
+
+ // Remove rest of the frame left on the stack. We remove the return address
+ // which is implicitly popped when returning.
+ size_t framePop = sizeof(ExitFrameLayout) - sizeof(void*);
+
+ // Pop arguments from framePushed.
+ masm.implicitPop(stackSlots * sizeof(void*) + framePop);
+}
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+static const uint32_t STUB_FRAME_SIZE = sizeof(BaselineStubFrame);
+static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = offsetof(BaselineStubFrame, savedStub);
+
+inline void
+EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+ // Compute frame size.
+ masm.movePtr(BaselineFrameReg, scratch);
+ masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch);
+ masm.subPtr(BaselineStackReg, scratch);
+
+ masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size());
+ masm.subPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+ masm.storePtr(scratch, Address(StackPointer, offsetof(BaselineStubFrame, descriptor)));
+ masm.storePtr(ICTailCallReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, returnAddress)));
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.storePtr(ICStubReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, savedStub)));
+ masm.storePtr(BaselineFrameReg, Address(StackPointer,
+ offsetof(BaselineStubFrame, savedFrame)));
+ masm.movePtr(BaselineStackReg, BaselineFrameReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+inline void
+EmitIonEnterStubFrame(MacroAssembler& masm, Register scratch)
+{
+ MOZ_ASSERT(ICTailCallReg == ra);
+
+ // In MIPS the ra register contains the return address,
+ // but in jit frames we expect it to be on the stack. As a result
+ // push the link register (which is actually part of the previous frame.
+ // Therefore using push instead of Push).
+ masm.push(ICTailCallReg);
+
+ masm.Push(ICStubReg);
+}
+
+inline void
+EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false)
+{
+ // Ion frames do not save and restore the frame pointer. If we called
+ // into Ion, we have to restore the stack pointer from the frame descriptor.
+ // If we performed a VM call, the descriptor has been popped already so
+ // in that case we use the frame pointer.
+ if (calledIntoIon) {
+ masm.pop(ScratchRegister);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), ScratchRegister);
+ masm.addPtr(ScratchRegister, BaselineStackReg);
+ } else {
+ masm.movePtr(BaselineFrameReg, BaselineStackReg);
+ }
+
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedFrame)),
+ BaselineFrameReg);
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedStub)),
+ ICStubReg);
+
+ // Load the return address.
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, returnAddress)),
+ ICTailCallReg);
+
+ // Discard the frame descriptor.
+ masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, descriptor)), ScratchRegister);
+ masm.addPtr(Imm32(STUB_FRAME_SIZE), StackPointer);
+}
+
+inline void
+EmitIonLeaveStubFrame(MacroAssembler& masm)
+{
+ masm.Pop(ICStubReg);
+ masm.pop(ICTailCallReg); // See EmitIonEnterStubFrame for explanation on pop/Pop.
+}
+
+inline void
+EmitStowICValues(MacroAssembler& masm, int values)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Stow R0
+ masm.Push(R0);
+ break;
+ case 2:
+ // Stow R0 and R1
+ masm.Push(R0);
+ masm.Push(R1);
+ }
+}
+
+inline void
+EmitUnstowICValues(MacroAssembler& masm, int values, bool discard = false)
+{
+ MOZ_ASSERT(values >= 0 && values <= 2);
+ switch(values) {
+ case 1:
+ // Unstow R0.
+ if (discard)
+ masm.addPtr(Imm32(sizeof(Value)), BaselineStackReg);
+ else
+ masm.popValue(R0);
+ break;
+ case 2:
+ // Unstow R0 and R1.
+ if (discard) {
+ masm.addPtr(Imm32(sizeof(Value) * 2), BaselineStackReg);
+ } else {
+ masm.popValue(R1);
+ masm.popValue(R0);
+ }
+ break;
+ }
+ masm.adjustFrame(-values * sizeof(Value));
+}
+
+inline void
+EmitCallTypeUpdateIC(MacroAssembler& masm, JitCode* code, uint32_t objectOffset)
+{
+ // R0 contains the value that needs to be typechecked.
+ // The object we're updating is a boxed Value on the stack, at offset
+ // objectOffset from $sp, excluding the return address.
+
+ // Save the current ICStubReg to stack, as well as the TailCallReg,
+ // since on mips, the $ra is live.
+ masm.subPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+ masm.storePtr(ICStubReg, Address(StackPointer, sizeof(intptr_t)));
+ masm.storePtr(ICTailCallReg, Address(StackPointer, 0));
+
+ // This is expected to be called from within an IC, when ICStubReg
+ // is properly initialized to point to the stub.
+ masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
+ ICStubReg);
+
+ // Load stubcode pointer from ICStubReg into ICTailCallReg.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode.
+ masm.call(R2.scratchReg());
+
+ // Restore the old stub reg and tailcall reg.
+ masm.loadPtr(Address(StackPointer, 0), ICTailCallReg);
+ masm.loadPtr(Address(StackPointer, sizeof(intptr_t)), ICStubReg);
+ masm.addPtr(Imm32(2 * sizeof(intptr_t)), StackPointer);
+
+ // The update IC will store 0 or 1 in R1.scratchReg() reflecting if the
+ // value in R0 type-checked properly or not.
+ Label success;
+ masm.ma_b(R1.scratchReg(), Imm32(1), &success, Assembler::Equal, ShortJump);
+
+ // If the IC failed, then call the update fallback function.
+ EmitBaselineEnterStubFrame(masm, R1.scratchReg());
+
+ masm.loadValue(Address(BaselineStackReg, STUB_FRAME_SIZE + objectOffset), R1);
+
+ masm.Push(R0);
+ masm.Push(R1);
+ masm.Push(ICStubReg);
+
+ // Load previous frame pointer, push BaselineFrame*.
+ masm.loadPtr(Address(BaselineFrameReg, 0), R0.scratchReg());
+ masm.pushBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
+
+ EmitBaselineCallVM(code, masm);
+ EmitBaselineLeaveStubFrame(masm);
+
+ // Success at end.
+ masm.bind(&success);
+}
+
+template <typename AddrType>
+inline void
+EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type)
+{
+ // On MIPS, $ra is clobbered by patchableCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.patchableCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void
+EmitStubGuardFailure(MacroAssembler& masm)
+{
+ // NOTE: This routine assumes that the stub guard code left the stack in
+ // the same state it was in when it was entered.
+
+ // BaselineStubEntry points to the current stub.
+
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg);
+
+ // Load stubcode pointer from BaselineStubEntry into scratch register.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.branch(R2.scratchReg());
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_SharedICHelpers_mips_shared_h */