summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips32
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/jit/mips32
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/mips32')
-rw-r--r--js/src/jit/mips32/Architecture-mips32.cpp102
-rw-r--r--js/src/jit/mips32/Architecture-mips32.h287
-rw-r--r--js/src/jit/mips32/Assembler-mips32.cpp545
-rw-r--r--js/src/jit/mips32/Assembler-mips32.h227
-rw-r--r--js/src/jit/mips32/Bailouts-mips32.cpp48
-rw-r--r--js/src/jit/mips32/Bailouts-mips32.h77
-rw-r--r--js/src/jit/mips32/BaselineCompiler-mips32.cpp16
-rw-r--r--js/src/jit/mips32/BaselineCompiler-mips32.h26
-rw-r--r--js/src/jit/mips32/BaselineIC-mips32.cpp45
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.cpp832
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.h96
-rw-r--r--js/src/jit/mips32/LIR-mips32.h169
-rw-r--r--js/src/jit/mips32/LOpcodes-mips32.h25
-rw-r--r--js/src/jit/mips32/Lowering-mips32.cpp258
-rw-r--r--js/src/jit/mips32/Lowering-mips32.h57
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32-inl.h1077
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp2365
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h1021
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.cpp156
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.h34
-rw-r--r--js/src/jit/mips32/SharedIC-mips32.cpp177
-rw-r--r--js/src/jit/mips32/SharedICRegisters-mips32.h44
-rw-r--r--js/src/jit/mips32/Simulator-mips32.cpp3519
-rw-r--r--js/src/jit/mips32/Simulator-mips32.h424
-rw-r--r--js/src/jit/mips32/Trampoline-mips32.cpp1418
25 files changed, 13045 insertions, 0 deletions
diff --git a/js/src/jit/mips32/Architecture-mips32.cpp b/js/src/jit/mips32/Architecture-mips32.cpp
new file mode 100644
index 000000000..9aca3f831
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.cpp
@@ -0,0 +1,102 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Architecture-mips32.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char * const Registers::RegNames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" };
+
+const uint32_t Allocatable = 14;
+
+const Registers::SetType Registers::ArgRegMask = Registers::SharedArgRegMask;
+
+const Registers::SetType Registers::JSCallMask =
+ (1 << Registers::a2) |
+ (1 << Registers::a3);
+
+const Registers::SetType Registers::CallMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1); // used for double-size returns
+
+FloatRegisters::Code
+FloatRegisters::FromName(const char* name)
+{
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0)
+ return Code(i);
+ }
+
+ return Invalid;
+}
+
+FloatRegister
+FloatRegister::doubleOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Double)
+ return FloatRegister(code_ & ~1, Double);
+ return *this;
+}
+
+FloatRegister
+FloatRegister::singleOverlay(unsigned int which) const
+{
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Double) {
+ // Only even registers are double
+ MOZ_ASSERT(code_ % 2 == 0);
+ MOZ_ASSERT(which < 2);
+ return FloatRegister(code_ + which, Single);
+ }
+ MOZ_ASSERT(which == 0);
+ return FloatRegister(code_, Single);
+}
+
+FloatRegisterSet
+FloatRegister::ReduceSetForPush(const FloatRegisterSet& s)
+{
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t
+FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s)
+{
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xffffffff) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t
+FloatRegister::getRegisterDumpOffsetInBytes()
+{
+ if (isSingle())
+ return id() * sizeof(float);
+ if (isDouble())
+ return id() * sizeof(double);
+ MOZ_CRASH();
+}
+
+} // namespace ion
+} // namespace js
+
diff --git a/js/src/jit/mips32/Architecture-mips32.h b/js/src/jit/mips32/Architecture-mips32.h
new file mode 100644
index 000000000..9e5f3ca28
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -0,0 +1,287 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Architecture_mips32_h
+#define jit_mips32_Architecture_mips32_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// Shadow stack space is not required on MIPS.
+static const uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+// Size of MIPS32 general purpose registers is 32 bits.
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+
+// Size of each bailout table entry.
+// For MIPS this is 2 instructions relative call.
+static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 2 * sizeof(void*);
+
+// MIPS32 can have two types of floating-point coprocessors:
+// - 32 bit floating-point coprocessor - In this case, there are 32 single
+// precision registers and pairs of even and odd float registers are used as
+// double precision registers. Example: f0 (double) is composed of
+// f0 and f1 (single).
+// - 64 bit floating-point coprocessor - In this case, there are 32 double
+// precision register which can also be used as single precision registers.
+
+// When using O32 ABI, floating-point coprocessor is 32 bit.
+// When using N32 ABI, floating-point coprocessor is 64 bit.
+class FloatRegisters : public FloatRegistersMIPSShared
+{
+ public:
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < Total);
+ return FloatRegistersMIPSShared::GetName(Code(i % 32));
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t Total = 64;
+ static const uint32_t TotalDouble = 16;
+ static const uint32_t RegisterIdLimit = 32;
+ // Workarounds: On Loongson CPU-s the odd FP registers behave differently
+ // in fp-32 mode than standard MIPS.
+#if defined(_MIPS_ARCH_LOONGSON3A)
+ static const uint32_t TotalSingle = 16;
+ static const uint32_t Allocatable = 28;
+ static const SetType AllSingleMask = 0x55555555ULL;
+#else
+ static const uint32_t TotalSingle = 32;
+ static const uint32_t Allocatable = 42;
+ static const SetType AllSingleMask = (1ULL << 32) - 1;
+#endif
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 16;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ static const SetType AllDoubleMask = 0x55555555ULL << 32;
+ static const SetType AllMask = AllDoubleMask | AllSingleMask;
+
+ static const SetType NonVolatileDoubleMask =
+ ((1ULL << FloatRegisters::f20) |
+ (1ULL << FloatRegisters::f22) |
+ (1ULL << FloatRegisters::f24) |
+ (1ULL << FloatRegisters::f26) |
+ (1ULL << FloatRegisters::f28) |
+ (1ULL << FloatRegisters::f30)) << 32;
+
+ // f20-single and f21-single alias f20-double ...
+ static const SetType NonVolatileMask =
+ NonVolatileDoubleMask |
+ (1ULL << FloatRegisters::f20) |
+ (1ULL << FloatRegisters::f21) |
+ (1ULL << FloatRegisters::f22) |
+ (1ULL << FloatRegisters::f23) |
+ (1ULL << FloatRegisters::f24) |
+ (1ULL << FloatRegisters::f25) |
+ (1ULL << FloatRegisters::f26) |
+ (1ULL << FloatRegisters::f27) |
+ (1ULL << FloatRegisters::f28) |
+ (1ULL << FloatRegisters::f29) |
+ (1ULL << FloatRegisters::f30) |
+ (1ULL << FloatRegisters::f31);
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+ static const SetType VolatileDoubleMask = AllDoubleMask & ~NonVolatileDoubleMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType NonAllocatableDoubleMask =
+ ((1ULL << FloatRegisters::f16) |
+ (1ULL << FloatRegisters::f18)) << 32;
+ // f16-single and f17-single alias f16-double ...
+ static const SetType NonAllocatableMask =
+ NonAllocatableDoubleMask |
+ (1ULL << FloatRegisters::f16) |
+ (1ULL << FloatRegisters::f17) |
+ (1ULL << FloatRegisters::f18) |
+ (1ULL << FloatRegisters::f19);
+
+ // Registers that can be allocated without being saved, generally.
+ static const SetType TempMask = VolatileMask & ~NonAllocatableMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+class FloatRegister : public FloatRegisterMIPSShared
+{
+ public:
+ enum RegType {
+ Single = 0x0,
+ Double = 0x1,
+ };
+
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+
+ uint32_t code_ : 6;
+ protected:
+ RegType kind_ : 1;
+
+ public:
+ constexpr FloatRegister(uint32_t code, RegType kind = Double)
+ : code_ (Code(code)), kind_(kind)
+ { }
+ constexpr FloatRegister()
+ : code_(Code(FloatRegisters::invalid_freg)), kind_(Double)
+ { }
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && code_ == other.code_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const { return (kind_ == Double) ? 8 : 4; }
+ bool isInvalid() const {
+ return code_ == FloatRegisters::invalid_freg;
+ }
+
+ bool isSingle() const { return kind_ == Single; }
+ bool isDouble() const { return kind_ == Double; }
+
+ FloatRegister doubleOverlay(unsigned int which = 0) const;
+ FloatRegister singleOverlay(unsigned int which = 0) const;
+ FloatRegister sintOverlay(unsigned int which = 0) const;
+ FloatRegister uintOverlay(unsigned int which = 0) const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ return Code(code_ | (kind_ << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ return Encoding(code_);
+ }
+ uint32_t id() const {
+ return code_;
+ }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 31;
+ uint32_t kind = i >> 5;
+ return FloatRegister(code, RegType(kind));
+ }
+ // This is similar to FromCode except for double registers on O32.
+ static FloatRegister FromIndex(uint32_t index, RegType kind) {
+#if defined(USES_O32_ABI)
+ // Only even FP registers are avaiable for Loongson on O32.
+# if defined(_MIPS_ARCH_LOONGSON3A)
+ return FloatRegister(index * 2, kind);
+# else
+ if (kind == Double)
+ return FloatRegister(index * 2, kind);
+# endif
+#endif
+ return FloatRegister(index, kind);
+ }
+
+ bool volatile_() const {
+ if (isDouble())
+ return !!((1ULL << code_) & FloatRegisters::VolatileMask);
+ return !!((1ULL << (code_ & ~1)) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const {
+ return FloatRegisters::GetName(code_);
+ }
+ bool operator != (const FloatRegister& other) const {
+ return other.kind_ != kind_ || code_ != other.code_;
+ }
+ bool aliases(const FloatRegister& other) {
+ if (kind_ == other.kind_)
+ return code_ == other.code_;
+ return doubleOverlay() == other.doubleOverlay();
+ }
+ uint32_t numAliased() const {
+ if (isDouble()) {
+ MOZ_ASSERT((code_ & 1) == 0);
+ return 3;
+ }
+ return 2;
+ }
+ void aliased(uint32_t aliasIdx, FloatRegister* ret) {
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ if (isDouble()) {
+ MOZ_ASSERT((code_ & 1) == 0);
+ MOZ_ASSERT(aliasIdx <= 2);
+ *ret = singleOverlay(aliasIdx - 1);
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ *ret = doubleOverlay(aliasIdx - 1);
+ }
+ uint32_t numAlignedAliased() const {
+ if (isDouble()) {
+ MOZ_ASSERT((code_ & 1) == 0);
+ return 2;
+ }
+ // f1-float32 has 0 other aligned aliases, 1 total.
+ // f0-float32 has 1 other aligned alias, 2 total.
+ return 2 - (code_ & 1);
+ }
+ // | f0-double |
+ // | f0-float32 | f1-float32 |
+ // We only push double registers on MIPS. So, if we've stored f0-double
+ // we also want to f0-float32 is stored there.
+ void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) {
+ MOZ_ASSERT(isDouble());
+ MOZ_ASSERT((code_ & 1) == 0);
+ if (aliasIdx == 0) {
+ *ret = *this;
+ return;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ *ret = singleOverlay(aliasIdx - 1);
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ if (isSingle())
+ return SetType(1) << code_;
+
+ MOZ_ASSERT(isDouble());
+ return SetType(0b11) << code_;
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+// In order to handle functions such as int(*)(int, double) where the first
+// argument is a general purpose register, and the second argument is a floating
+// point register, we have to store the double content into 2 general purpose
+// registers, namely a2 and a3.
+#define JS_CODEGEN_REGISTER_PAIR 1
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Architecture_mips32_h */
diff --git a/js/src/jit/mips32/Assembler-mips32.cpp b/js/src/jit/mips32/Assembler-mips32.cpp
new file mode 100644
index 000000000..6283c1d5a
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -0,0 +1,545 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Assembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : usedArgSlots_(0),
+ firstArgFloatSize_(0),
+ useGPRForFloats_(false),
+ current_()
+{}
+
+ABIArg
+ABIArgGenerator::next(MIRType type)
+{
+ Register destReg;
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ if (GetIntArgReg(usedArgSlots_, &destReg))
+ current_ = ABIArg(destReg);
+ else
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_++;
+ break;
+ case MIRType::Int64:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(a0, a1);
+ usedArgSlots_ = 2;
+ } else if (usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs)
+ usedArgSlots_ = NumIntArgRegs;
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ case MIRType::Float32:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12.asSingle());
+ firstArgFloatSize_ = 1;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14.asSingle());
+ } else if (useGPRForFloats_ && GetIntArgReg(usedArgSlots_, &destReg)) {
+ current_ = ABIArg(destReg);
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs)
+ usedArgSlots_ = NumIntArgRegs;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ }
+ usedArgSlots_++;
+ break;
+ case MIRType::Double:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12);
+ usedArgSlots_ = 2;
+ firstArgFloatSize_ = 2;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14);
+ usedArgSlots_ = 4;
+ } else if (useGPRForFloats_ && usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs)
+ usedArgSlots_ = NumIntArgRegs;
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t
+js::jit::RT(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RTShift;
+}
+
+uint32_t
+js::jit::RD(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RDShift;
+}
+
+uint32_t
+js::jit::RZ(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RZShift;
+}
+
+uint32_t
+js::jit::SA(FloatRegister r)
+{
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << SAShift;
+}
+
+// Used to patch jumps created by MacroAssemblerMIPSCompat::jumpWithPatch.
+void
+jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
+{
+ Instruction* inst1 = (Instruction*)jump_.raw();
+ Instruction* inst2 = inst1->next();
+
+ MaybeAutoWritableJitCode awjc(inst1, 8, reprotect);
+ Assembler::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
+
+ AutoFlushICache::flush(uintptr_t(inst1), 8);
+}
+
+// For more infromation about backedges look at comment in
+// MacroAssemblerMIPSCompat::backedgeJump()
+void
+jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
+ JitRuntime::BackedgeTarget target)
+{
+ uint32_t sourceAddr = (uint32_t)jump.raw();
+ uint32_t targetAddr = (uint32_t)label.raw();
+ InstImm* branch = (InstImm*)jump.raw();
+
+ MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift));
+
+ if (BOffImm16::IsInRange(targetAddr - sourceAddr)) {
+ branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr));
+ } else {
+ if (target == JitRuntime::BackedgeLoopHeader) {
+ Instruction* lui = &branch[1];
+ Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr);
+ // Jump to ori. The lui will be executed in delay slot.
+ branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t)));
+ } else {
+ Instruction* lui = &branch[4];
+ Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr);
+ branch->setBOffImm16(BOffImm16(4 * sizeof(uint32_t)));
+ }
+ }
+}
+
+void
+Assembler::executableCopy(uint8_t* buffer)
+{
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+
+ // Patch all long jumps during code copy.
+ for (size_t i = 0; i < longJumps_.length(); i++) {
+ Instruction* inst1 = (Instruction*) ((uint32_t)buffer + longJumps_[i]);
+
+ uint32_t value = Assembler::ExtractLuiOriValue(inst1, inst1->next());
+ Assembler::UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
+ }
+
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
+uintptr_t
+Assembler::GetPointer(uint8_t* instPtr)
+{
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+static JitCode*
+CodeFromJump(Instruction* jump)
+{
+ uint8_t* target = (uint8_t*)Assembler::ExtractLuiOriValue(jump, jump->next());
+ return JitCode::FromExecutable(target);
+}
+
+void
+Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ JitCode* child = CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void
+TraceOneDataRelocation(JSTracer* trc, Instruction* inst)
+{
+ void* ptr = (void*)Assembler::ExtractLuiOriValue(inst, inst->next());
+ void* prior = ptr;
+
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr),
+ "ion-masm-ptr");
+ if (ptr != prior) {
+ Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
+ AutoFlushICache::flush(uintptr_t(inst), 8);
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(buffer + offset);
+ TraceOneDataRelocation(trc, inst);
+ }
+}
+
+static void
+TraceDataRelocations(JSTracer* trc, MIPSBuffer* buffer, CompactBufferReader& reader)
+{
+ while (reader.more()) {
+ BufferOffset bo (reader.readUnsigned());
+ MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer);
+ TraceOneDataRelocation(trc, iter.cur());
+ }
+}
+
+void
+Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
+{
+ ::TraceDataRelocations(trc, code->raw(), reader);
+}
+
+Assembler::Condition
+Assembler::UnsignedCondition(Condition cond)
+{
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::Condition
+Assembler::ConditionWithoutEqual(Condition cond)
+{
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void
+Assembler::trace(JSTracer* trc)
+{
+ for (size_t i = 0; i < jumps_.length(); i++) {
+ RelativePatch& rp = jumps_[i];
+ if (rp.kind == Relocation::JITCODE) {
+ JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target);
+ TraceManuallyBarrieredEdge(trc, &code, "masmrel32");
+ MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target));
+ }
+ }
+ if (dataRelocations_.length()) {
+ CompactBufferReader reader(dataRelocations_);
+ ::TraceDataRelocations(trc, &m_buffer, reader);
+ }
+}
+
+void
+Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
+{
+ if (label->bound()) {
+ intptr_t offset = label->offset();
+ Instruction* inst = (Instruction*) (rawCode + offset);
+ Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address);
+ }
+}
+
+void
+Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
+{
+ int32_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ // Skip the trailing nops in conditional branches.
+ if (conditional) {
+ inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*))).encode();
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(void*)));
+ Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ }
+}
+
+void
+Assembler::bind(RepatchLabel* label)
+{
+ BufferOffset dest = nextOffset();
+ if (label->used() && !oom()) {
+ // If the label has a use, then change this use to refer to
+ // the bound label;
+ BufferOffset b(label->offset());
+ InstImm* inst = (InstImm*)editSrc(b);
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ uint32_t offset = dest.getOffset() - label->offset();
+
+ // If first instruction is lui, then this is a long jump.
+ // If second instruction is lui, then this is a loop backedge.
+ if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
+ // For unconditional long branches generated by ma_liPatchable,
+ // such as under:
+ // jumpWithpatch
+ Assembler::UpdateLuiOriValue(inst, inst->next(), dest.getOffset());
+ } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
+ BOffImm16::IsInRange(offset))
+ {
+ // Handle code produced by:
+ // backedgeJump
+ // branchWithCode
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ } else if (inst[0].encode() == inst_beq.encode()) {
+ // Handle open long unconditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ // We need to add it to long jumps array here.
+ // See MacroAssemblerMIPS::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ addLongJump(BufferOffset(label->offset()));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, dest.getOffset());
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ } else {
+ // Handle open long conditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ // No need for a "nop" here because we can clobber scratch.
+ // We need to add it to long jumps array here.
+ // See MacroAssemblerMIPS::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ MOZ_ASSERT(inst[4].encode() == NopInst);
+ addLongJump(BufferOffset(label->offset() + sizeof(void*)));
+ Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, dest.getOffset());
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ }
+ }
+ label->bind(dest.getOffset());
+}
+
+uint32_t
+Assembler::PatchWrite_NearCallSize()
+{
+ return 4 * sizeof(uint32_t);
+}
+
+void
+Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+{
+ Instruction* inst = (Instruction*) start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[3] = InstNOP();
+
+ // Ensure everyone sees the code that was just written into memory.
+ AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
+}
+
+uint32_t
+Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1)
+{
+ InstImm* i0 = (InstImm*) inst0;
+ InstImm* i1 = (InstImm*) inst1;
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ uint32_t value = i0->extractImm16Value() << 16;
+ value = value | i1->extractImm16Value();
+ return value;
+}
+
+void
+Assembler::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value)
+{
+ MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ ((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value)));
+ ((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+void
+Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1,
+ Register reg, uint32_t value)
+{
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue)
+{
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void
+Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue)
+{
+ Instruction* inst = (Instruction*) label.raw();
+
+ // Extract old Value
+ DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]);
+ MOZ_ASSERT(value == uint32_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value));
+
+ AutoFlushICache::flush(uintptr_t(inst), 8);
+}
+
+void
+Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
+{
+ InstImm* inst = (InstImm*)code;
+ Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value);
+}
+
+uint32_t
+Assembler::ExtractInstructionImmediate(uint8_t* code)
+{
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+void
+Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
+{
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*) inst;
+ InstImm* i1 = (InstImm*) i0->next();
+ Instruction* i2 = (Instruction*) i1->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i2 = jalr;
+ } else {
+ InstNOP nop;
+ *i2 = nop;
+ }
+
+ AutoFlushICache::flush(uintptr_t(i2), 4);
+}
diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h
new file mode 100644
index 000000000..9fdbcda98
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Assembler_mips32_h
+#define jit_mips32_Assembler_mips32_h
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips32/Architecture-mips32.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = t4;
+static constexpr Register CallTempReg5 = t5;
+
+static constexpr Register CallTempNonArgRegs[] = { t0, t1, t2, t3, t4 };
+static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs);
+
+class ABIArgGenerator
+{
+ unsigned usedArgSlots_;
+ unsigned firstArgFloatSize_;
+ // Note: This is not compliant with the system ABI. The Lowering phase
+ // expects to lower an MWasmParameter to only one register.
+ bool useGPRForFloats_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ void enforceO32ABI() {
+ useGPRForFloats_ = true;
+ }
+
+ uint32_t stackBytesConsumedSoFar() const {
+ if (usedArgSlots_ <= 4)
+ return ShadowStackSpace;
+
+ return usedArgSlots_ * sizeof(intptr_t);
+ }
+};
+
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = s5;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg = ABINonArgReg0;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg1;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg2;
+
+static constexpr Register JSReturnReg_Type = a3;
+static constexpr Register JSReturnReg_Data = a2;
+static constexpr Register64 ReturnReg64(InvalidReg, InvalidReg);
+static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::f0, FloatRegister::Single };
+static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegister::Double };
+static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f18, FloatRegister::Single };
+static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f18, FloatRegister::Double };
+static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f16, FloatRegister::Single };
+static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f16, FloatRegister::Double };
+
+// Registers used in the GenerateFFIIonExit Disable Activation block.
+// None of these may be the second scratch register (t8).
+static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
+static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
+
+static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegister::Double };
+static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegister::Double };
+static constexpr FloatRegister f4 = { FloatRegisters::f4, FloatRegister::Double };
+static constexpr FloatRegister f6 = { FloatRegisters::f6, FloatRegister::Double };
+static constexpr FloatRegister f8 = { FloatRegisters::f8, FloatRegister::Double };
+static constexpr FloatRegister f10 = { FloatRegisters::f10, FloatRegister::Double };
+static constexpr FloatRegister f12 = { FloatRegisters::f12, FloatRegister::Double };
+static constexpr FloatRegister f14 = { FloatRegisters::f14, FloatRegister::Double };
+static constexpr FloatRegister f16 = { FloatRegisters::f16, FloatRegister::Double };
+static constexpr FloatRegister f18 = { FloatRegisters::f18, FloatRegister::Double };
+static constexpr FloatRegister f20 = { FloatRegisters::f20, FloatRegister::Double };
+static constexpr FloatRegister f22 = { FloatRegisters::f22, FloatRegister::Double };
+static constexpr FloatRegister f24 = { FloatRegisters::f24, FloatRegister::Double };
+static constexpr FloatRegister f26 = { FloatRegisters::f26, FloatRegister::Double };
+static constexpr FloatRegister f28 = { FloatRegisters::f28, FloatRegister::Double };
+static constexpr FloatRegister f30 = { FloatRegisters::f30, FloatRegister::Double };
+
+// MIPS CPUs can only load multibyte data that is "naturally"
+// four-byte-aligned, sp register should be eight-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t JitStackAlignment = 8;
+
+static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 8;
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+
+// Does this architecture support SIMD conversions between Uint32x4 and Float32x4?
+static constexpr bool SupportsUint32x4FloatConversions = false;
+
+// Does this architecture support comparisons of unsigned integer vectors?
+static constexpr bool SupportsUint8x16Compares = false;
+static constexpr bool SupportsUint16x8Compares = false;
+static constexpr bool SupportsUint32x4Compares = false;
+
+static constexpr Scale ScalePointer = TimesFour;
+
+class Assembler : public AssemblerMIPSShared
+{
+ public:
+ Assembler()
+ : AssemblerMIPSShared()
+ { }
+
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ // MacroAssemblers hold onto gcthings, so they are traced by the GC.
+ void trace(JSTracer* trc);
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ protected:
+ // This is used to access the odd register form the pair of single
+ // precision registers that make one double register.
+ FloatRegister getOddPair(FloatRegister reg) {
+ MOZ_ASSERT(reg.isDouble());
+ return reg.singleOverlay(1);
+ }
+
+ public:
+ using AssemblerMIPSShared::bind;
+
+ void bind(RepatchLabel* label);
+ void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1);
+ static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value);
+ static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1,
+ Register reg, uint32_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
+ static uint32_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 4;
+
+static inline bool
+GetIntArgReg(uint32_t usedArgSlots, Register* out)
+{
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool
+GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out)
+{
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out))
+ return true;
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs)
+ return false;
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+static inline uint32_t
+GetArgStackDisp(uint32_t usedArgSlots)
+{
+ MOZ_ASSERT(usedArgSlots >= NumIntArgRegs);
+ // Even register arguments have place reserved on stack.
+ return usedArgSlots * sizeof(intptr_t);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Assembler_mips32_h */
diff --git a/js/src/jit/mips32/Bailouts-mips32.cpp b/js/src/jit/mips32/Bailouts-mips32.cpp
new file mode 100644
index 000000000..1b92d729c
--- /dev/null
+++ b/js/src/jit/mips32/Bailouts-mips32.cpp
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Bailouts-mips32.h"
+
+#include "jscntxt.h"
+#include "jscompartment.h"
+
+using namespace js;
+using namespace js::jit;
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machine())
+{
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken());
+ JitActivation* activation = activations.activation()->asJit();
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
+ JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());
+
+ MOZ_ASSERT(tableOffset >= tableStart &&
+ tableOffset < tableStart + code->instructionsSize());
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId = ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
diff --git a/js/src/jit/mips32/Bailouts-mips32.h b/js/src/jit/mips32/Bailouts-mips32.h
new file mode 100644
index 000000000..0c4d7f313
--- /dev/null
+++ b/js/src/jit/mips32/Bailouts-mips32.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Bailouts_mips32_h
+#define jit_mips32_Bailouts_mips32_h
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+
+namespace js {
+namespace jit {
+
+class BailoutStack
+{
+ uintptr_t frameClassId_;
+ // This is pushed in the bailout handler. Both entry points into the
+ // handler inserts their own value int lr, which is then placed onto the
+ // stack along with frameClassId_ above. This should be migrated to ip.
+ public:
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+
+ protected:
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+
+ uintptr_t snapshotOffset_;
+ uintptr_t padding_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None())
+ return frameSize_;
+ return frameClass().frameSize();
+ }
+ MachineState machine() {
+ return MachineState::FromBailout(regs_, fpregs_);
+ }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t* parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None())
+ return (uint8_t*)this + sizeof(BailoutStack);
+ return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+ static size_t offsetOfFrameClass() {
+ return offsetof(BailoutStack, frameClassId_);
+ }
+ static size_t offsetOfFrameSize() {
+ return offsetof(BailoutStack, frameSize_);
+ }
+ static size_t offsetOfFpRegs() {
+ return offsetof(BailoutStack, fpregs_);
+ }
+ static size_t offsetOfRegs() {
+ return offsetof(BailoutStack, regs_);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Bailouts_mips32_h */
diff --git a/js/src/jit/mips32/BaselineCompiler-mips32.cpp b/js/src/jit/mips32/BaselineCompiler-mips32.cpp
new file mode 100644
index 000000000..acbc67ff0
--- /dev/null
+++ b/js/src/jit/mips32/BaselineCompiler-mips32.cpp
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/BaselineCompiler-mips32.h"
+
+using namespace js;
+using namespace js::jit;
+
+BaselineCompilerMIPS::BaselineCompilerMIPS(JSContext* cx, TempAllocator& alloc,
+ JSScript* script)
+ : BaselineCompilerMIPSShared(cx, alloc, script)
+{
+}
diff --git a/js/src/jit/mips32/BaselineCompiler-mips32.h b/js/src/jit/mips32/BaselineCompiler-mips32.h
new file mode 100644
index 000000000..cd6fe41ee
--- /dev/null
+++ b/js/src/jit/mips32/BaselineCompiler-mips32.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_BaselineCompiler_mips32_h
+#define jit_mips32_BaselineCompiler_mips32_h
+
+#include "jit/mips-shared/BaselineCompiler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class BaselineCompilerMIPS : public BaselineCompilerMIPSShared
+{
+ protected:
+ BaselineCompilerMIPS(JSContext* cx, TempAllocator& alloc, JSScript* script);
+};
+
+typedef BaselineCompilerMIPS BaselineCompilerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_BaselineCompiler_mips32_h */
diff --git a/js/src/jit/mips32/BaselineIC-mips32.cpp b/js/src/jit/mips32/BaselineIC-mips32.cpp
new file mode 100644
index 000000000..e41ecf774
--- /dev/null
+++ b/js/src/jit/mips32/BaselineIC-mips32.cpp
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICCompare_Int32
+
+bool
+ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ Label conditionTrue;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Compare payload regs of R0 and R1.
+ Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
+ masm.ma_cmp_set(R0.payloadReg(), R0.payloadReg(), R1.payloadReg(), cond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.payloadReg(), R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp
new file mode 100644
index 000000000..b947c14aa
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -0,0 +1,832 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/CodeGenerator-mips32.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorMIPS>
+{
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorMIPS* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir)
+ : mir_(mir)
+ {}
+
+ MTableSwitch* mir() const {
+ return mir_;
+ }
+
+ CodeLabel* jumpLabel() {
+ return &jumpLabel_;
+ }
+};
+
+void
+CodeGeneratorMIPS::visitOutOfLineBailout(OutOfLineBailout* ool)
+{
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(2 * sizeof(void*)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()), Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void
+CodeGeneratorMIPS::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
+{
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel()->target());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.ma_li(ScratchRegister, cl.patchAt());
+ masm.branch(ScratchRegister);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void
+CodeGeneratorMIPS::emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register address)
+{
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0)
+ masm.subPtr(Imm32(mir->low()), index);
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(address, ool->jumpLabel()->patchAt());
+ masm.lshiftPtr(Imm32(4), index);
+ masm.addPtr(index, address);
+
+ masm.branch(address);
+}
+
+static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
+
+FrameSizeClass
+FrameSizeClass::FromDepth(uint32_t frameDepth)
+{
+ for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
+ if (frameDepth < FrameSizes[i])
+ return FrameSizeClass(i);
+ }
+
+ return FrameSizeClass::None();
+}
+
+FrameSizeClass
+FrameSizeClass::ClassLimit()
+{
+ return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
+}
+
+uint32_t
+FrameSizeClass::frameSize() const
+{
+ MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
+ MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
+
+ return FrameSizes[class_];
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToOutValue(LInstruction* ins)
+{
+ Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand
+CodeGeneratorMIPS::ToTempValue(LInstruction* ins, size_t pos)
+{
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void
+CodeGeneratorMIPS::visitBox(LBox* box)
+{
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // For NUNBOX32, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void
+CodeGeneratorMIPS::visitBoxFloatingPoint(LBoxFloatingPoint* box)
+{
+ const LDefinition* payload = box->getDef(PAYLOAD_INDEX);
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+ const LAllocation* in = box->getOperand(0);
+
+ FloatRegister reg = ToFloatRegister(in);
+ if (box->type() == MIRType::Float32) {
+ masm.convertFloat32ToDouble(reg, ScratchDoubleReg);
+ reg = ScratchDoubleReg;
+ }
+ masm.ma_mv(reg, ValueOperand(ToRegister(type), ToRegister(payload)));
+}
+
+void
+CodeGeneratorMIPS::visitUnbox(LUnbox* unbox)
+{
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+
+ if (mir->fallible()) {
+ bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
+ unbox->snapshot());
+ }
+}
+
+Register
+CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value)
+{
+ return value.typeReg();
+}
+
+void
+CodeGeneratorMIPS::visitCompareB(LCompareB* lir)
+{
+ MCompare* mir = lir->mir();
+
+ const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
+ const LAllocation* rhs = lir->rhs();
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+
+ Label notBoolean, done;
+ masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
+ {
+ if (rhs->isConstant())
+ masm.cmp32Set(cond, lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), output);
+ else
+ masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output);
+ masm.jump(&done);
+ }
+
+ masm.bind(&notBoolean);
+ {
+ masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitCompareBAndBranch(LCompareBAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
+ const LAllocation* rhs = lir->rhs();
+
+ MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* mirNotBoolean = (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue();
+ branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean, Assembler::NotEqual);
+
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ if (rhs->isConstant())
+ emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ else
+ emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS::visitCompareBitwise(LCompareBitwise* lir)
+{
+ MCompare* mir = lir->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput);
+ const Register output = ToRegister(lir->output());
+
+ MOZ_ASSERT(IsEqualityOp(mir->jsop()));
+
+ Label notEqual, done;
+ masm.ma_b(lhs.typeReg(), rhs.typeReg(), &notEqual, Assembler::NotEqual, ShortJump);
+ {
+ masm.cmp32Set(cond, lhs.payloadReg(), rhs.payloadReg(), output);
+ masm.ma_b(&done, ShortJump);
+ }
+ masm.bind(&notEqual);
+ {
+ masm.move32(Imm32(cond == Assembler::NotEqual), output);
+ }
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
+ const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput);
+ const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput);
+
+ MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
+ mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
+
+ MBasicBlock* notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
+
+ branchToBlock(lhs.typeReg(), rhs.typeReg(), notEqual, Assembler::NotEqual);
+ emitBranch(lhs.payloadReg(), rhs.payloadReg(), cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS::visitCompareI64(LCompareI64* lir)
+{
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.move32(Imm32(0), output);
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitCompareI64AndBranch(LCompareI64AndBranch* lir)
+{
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void
+CodeGeneratorMIPS::visitDivOrModI64(LDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // All inputs are useAtStart for a call instruction. As a result we cannot
+ // ask for a non-aliasing temp. Using the following to get such a temp.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (lir->mir()->isMod()) {
+ masm.xor64(output, output);
+ } else {
+ masm.jump(trap(lir, wasm::Trap::IntegerOverflow));
+ }
+ masm.jump(&done);
+ masm.bind(&notmin);
+ }
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::ModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::DivI64);
+ MOZ_ASSERT(ReturnReg64 == output);
+
+ masm.bind(&done);
+}
+
+void
+CodeGeneratorMIPS::visitUDivOrModI64(LUDivOrModI64* lir)
+{
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+
+ // All inputs are useAtStart for a call instruction. As a result we cannot
+ // ask for a non-aliasing temp. Using the following to get such a temp.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(lhs.low);
+ regs.take(lhs.high);
+ if (lhs != rhs) {
+ regs.take(rhs.low);
+ regs.take(rhs.high);
+ }
+ Register temp = regs.takeAny();
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero())
+ masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod())
+ masm.callWithABI(wasm::SymbolicAddress::UModI64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::UDivI64);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
+{
+ const MWasmLoad* mir = lir->mir();
+ Register64 output = ToOutRegister64(lir);
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (byteSize <= 4) {
+ masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ if (!isSigned)
+ masm.move32(Imm32(0), output.high);
+ else
+ masm.ma_sra(output.high, output.low, Imm32(31));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ if (byteSize <= 4) {
+ masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ if (!isSigned)
+ masm.move32(Imm32(0), output.high);
+ else
+ masm.ma_sra(output.high, output.low, Imm32(31));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPS::visitWasmLoadI64(LWasmLoadI64* lir)
+{
+ emitWasmLoadI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir)
+{
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void
+CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
+{
+ const MWasmStore* mir = lir->mir();
+ Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
+
+ uint32_t offset = mir->access().offset();
+ MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
+
+ Register ptr = ToRegister(lir->ptr());
+
+ if (offset) {
+ Register ptrPlusOffset = ToRegister(lir->ptrCopy());
+ masm.addPtr(Imm32(offset), ptrPlusOffset);
+ ptr = ptrPlusOffset;
+ } else {
+ MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
+ }
+
+ unsigned byteSize = mir->access().byteSize();
+ bool isSigned;
+ switch (mir->access().type()) {
+ case Scalar::Int8: isSigned = true; break;
+ case Scalar::Uint8: isSigned = false; break;
+ case Scalar::Int16: isSigned = true; break;
+ case Scalar::Uint16: isSigned = false; break;
+ case Scalar::Int32: isSigned = true; break;
+ case Scalar::Uint32: isSigned = false; break;
+ case Scalar::Int64: isSigned = true; break;
+ default: MOZ_CRASH("unexpected array type");
+ }
+
+ masm.memoryBarrier(mir->access().barrierBefore());
+
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ if (mir->access().isUnaligned()) {
+ Register temp = ToRegister(lir->getTemp(1));
+
+ if (byteSize <= 4) {
+ masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne),
+ temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ if (byteSize <= 4) {
+ masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ static_cast<LoadStoreSize>(8 * byteSize));
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
+ masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
+ masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
+ }
+
+ masm.memoryBarrier(mir->access().barrierAfter());
+}
+
+void
+CodeGeneratorMIPS::visitWasmStoreI64(LWasmStoreI64* lir)
+{
+ emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir)
+{
+ emitWasmStoreI64(lir);
+}
+
+void
+CodeGeneratorMIPS::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
+{
+ const MWasmLoadGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+ Register64 output = ToOutRegister64(ins);
+
+ masm.load32(Address(GlobalReg, addr + INT64LOW_OFFSET), output.low);
+ masm.load32(Address(GlobalReg, addr + INT64HIGH_OFFSET), output.high);
+}
+
+void
+CodeGeneratorMIPS::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
+{
+ const MWasmStoreGlobalVar* mir = ins->mir();
+ unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias;
+ MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(ins->value());
+
+ masm.store32(input.low, Address(GlobalReg, addr + INT64LOW_OFFSET));
+ masm.store32(input.high, Address(GlobalReg, addr + INT64HIGH_OFFSET));
+}
+
+void
+CodeGeneratorMIPS::visitWasmSelectI64(LWasmSelectI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation trueExpr = lir->trueExpr();
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 output = ToOutRegister64(lir);
+
+ masm.move64(ToRegister64(trueExpr), output);
+
+ if (falseExpr.low().isRegister()) {
+ masm.as_movz(output.low, ToRegister(falseExpr.low()), cond);
+ masm.as_movz(output.high, ToRegister(falseExpr.high()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.low()), output.low);
+ masm.loadPtr(ToAddress(falseExpr.high()), output.high);
+ masm.bind(&done);
+ }
+}
+
+void
+CodeGeneratorMIPS::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ masm.moveToDoubleLo(input.low, output);
+ masm.moveToDoubleHi(input.high, output);
+}
+
+void
+CodeGeneratorMIPS::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir)
+{
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.moveFromDoubleLo(input, output.low);
+ masm.moveFromDoubleHi(input, output.high);
+}
+
+void
+CodeGeneratorMIPS::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir)
+{
+ Register input = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ if (input != output.low)
+ masm.move32(input, output.low);
+ if (lir->mir()->isUnsigned())
+ masm.move32(Imm32(0), output.high);
+ else
+ masm.ma_sra(output.high, output.low, Imm32(31));
+}
+
+void
+CodeGeneratorMIPS::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir)
+{
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf())
+ masm.move32(ToRegister(input.low()), output);
+ else
+ masm.move32(ToRegister(input.high()), output);
+}
+
+void
+CodeGeneratorMIPS::visitClzI64(LClzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void
+CodeGeneratorMIPS::visitCtzI64(LCtzI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void
+CodeGeneratorMIPS::visitNotI64(LNotI64* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.as_or(output, input.low, input.high);
+ masm.cmp32Set(Assembler::Equal, output, Imm32(0), output);
+}
+
+void
+CodeGeneratorMIPS::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
+{
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister scratch = input;
+ Register64 output = ToOutRegister64(lir);
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new(alloc()) OutOfLineWasmTruncateCheck(mir, input);
+ addOutOfLineCode(ool, mir);
+
+ if (fromType == MIRType::Double) {
+ masm.branchDouble(Assembler::DoubleUnordered, input, input, ool->entry());
+ } else if (fromType == MIRType::Float32) {
+ masm.branchFloat(Assembler::DoubleUnordered, input, input, ool->entry());
+ scratch = ScratchDoubleReg;
+ masm.convertFloat32ToDouble(input, scratch);
+ } else {
+ MOZ_CRASH("unexpected type in visitOutOfLineWasmTruncateCheck");
+ }
+
+ masm.setupUnalignedABICall(output.high);
+ masm.passABIArg(scratch, MoveOp::DOUBLE);
+ if (lir->mir()->isUnsigned())
+ masm.callWithABI(wasm::SymbolicAddress::TruncateDoubleToUint64);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::TruncateDoubleToInt64);
+ masm.ma_b(output.high, Imm32(0x80000000), ool->rejoin(), Assembler::NotEqual);
+ masm.ma_b(output.low, Imm32(0x00000000), ool->rejoin(), Assembler::NotEqual);
+ masm.ma_b(ool->entry());
+
+ masm.bind(ool->rejoin());
+
+ MOZ_ASSERT(ReturnReg64 == output);
+}
+
+void
+CodeGeneratorMIPS::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MInt64ToFloatingPoint* mir = lir->mir();
+ MIRType toType = mir->type();
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(input.low);
+ regs.take(input.high);
+ Register temp = regs.takeAny();
+
+ masm.setupUnalignedABICall(temp);
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+
+ if (lir->mir()->isUnsigned())
+ masm.callWithABI(wasm::SymbolicAddress::Uint64ToFloatingPoint, MoveOp::DOUBLE);
+ else
+ masm.callWithABI(wasm::SymbolicAddress::Int64ToFloatingPoint, MoveOp::DOUBLE);
+
+ MOZ_ASSERT_IF(toType == MIRType::Double, output == ReturnDoubleReg);
+ if (toType == MIRType::Float32) {
+ MOZ_ASSERT(output == ReturnFloat32Reg);
+ masm.convertDoubleToFloat32(ReturnDoubleReg, output);
+ }
+}
+
+void
+CodeGeneratorMIPS::visitTestI64AndBranch(LTestI64AndBranch* lir)
+{
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ branchToBlock(input.high, Imm32(0), lir->ifTrue(), Assembler::NonZero);
+ emitBranch(input.low, Imm32(0), Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
+
+void
+CodeGeneratorMIPS::setReturnDoubleRegs(LiveRegisterSet* regs)
+{
+ MOZ_ASSERT(ReturnFloat32Reg.code_ == ReturnDoubleReg.code_);
+ regs->add(ReturnFloat32Reg);
+ regs->add(ReturnDoubleReg.singleOverlay(1));
+ regs->add(ReturnDoubleReg);
+}
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.h b/js/src/jit/mips32/CodeGenerator-mips32.h
new file mode 100644
index 000000000..fc4394b65
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_CodeGenerator_mips32_h
+#define jit_mips32_CodeGenerator_mips32_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS : public CodeGeneratorMIPSShared
+{
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_UNDEFINED), cond, ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_OBJECT), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ public:
+ void visitCompareB(LCompareB* lir);
+ void visitCompareBAndBranch(LCompareBAndBranch* lir);
+ void visitCompareBitwise(LCompareBitwise* lir);
+ void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir);
+ void visitCompareI64(LCompareI64* lir);
+ void visitCompareI64AndBranch(LCompareI64AndBranch* lir);
+ void visitDivOrModI64(LDivOrModI64* lir);
+ void visitUDivOrModI64(LUDivOrModI64* lir);
+ void visitWasmLoadI64(LWasmLoadI64* ins);
+ void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir);
+ void visitWasmStoreI64(LWasmStoreI64* ins);
+ void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins);
+ void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
+ void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
+ void visitWasmSelectI64(LWasmSelectI64* lir);
+ void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir);
+ void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir);
+ void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir);
+ void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir);
+ void visitClzI64(LClzI64* ins);
+ void visitCtzI64(LCtzI64* ins);
+ void visitNotI64(LNotI64* ins);
+ void visitWasmTruncateToInt64(LWasmTruncateToInt64* ins);
+ void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir);
+ void visitTestI64AndBranch(LTestI64AndBranch* lir);
+
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ protected:
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToOutValue(LInstruction* ins);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ Register splitTagForTest(const ValueOperand& value);
+
+ public:
+ CodeGeneratorMIPS(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm)
+ { }
+
+ public:
+ void visitBox(LBox* box);
+ void visitBoxFloatingPoint(LBoxFloatingPoint* box);
+ void visitUnbox(LUnbox* unbox);
+ void setReturnDoubleRegs(LiveRegisterSet* regs);
+};
+
+typedef CodeGeneratorMIPS CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_CodeGenerator_mips32_h */
diff --git a/js/src/jit/mips32/LIR-mips32.h b/js/src/jit/mips32/LIR-mips32.h
new file mode 100644
index 000000000..8c0fa9a95
--- /dev/null
+++ b/js/src/jit/mips32/LIR-mips32.h
@@ -0,0 +1,169 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_LIR_mips32_h
+#define jit_mips32_LIR_mips32_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp, MIRType type)
+ : type_(type)
+ {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0>
+{
+ public:
+ LIR_HEADER(Unbox);
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+ const LAllocation* payload() {
+ return getOperand(0);
+ }
+ const LAllocation* type() {
+ return getOperand(1);
+ }
+ const char* extraName() const {
+ return StringFromMIRType(mir()->type());
+ }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0>
+{
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : type_(type)
+ {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const {
+ return mir_->toUnbox();
+ }
+
+ MIRType type() const {
+ return type_;
+ }
+ const char* extraName() const {
+ return StringFromMIRType(type_);
+ }
+};
+
+class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
+{
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeDivideByZero();
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod())
+ return mir_->toMod()->canBeNegativeDividend();
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::TrapOffset trapOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod())
+ return mir_->toMod()->trapOffset();
+ return mir_->toDiv()->trapOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0>
+{
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const {
+ return mir_->toWasmTruncateToInt64();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_LIR_mips32_h */
diff --git a/js/src/jit/mips32/LOpcodes-mips32.h b/js/src/jit/mips32/LOpcodes-mips32.h
new file mode 100644
index 000000000..8e39737c7
--- /dev/null
+++ b/js/src/jit/mips32/LOpcodes-mips32.h
@@ -0,0 +1,25 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_LOpcodes_mips32_h__
+#define jit_mips32_LOpcodes_mips32_h__
+
+#include "jit/shared/LOpcodes-shared.h"
+
+#define LIR_CPU_OPCODE_LIST(_) \
+ _(BoxFloatingPoint) \
+ _(ModMaskI) \
+ _(UDivOrMod) \
+ _(DivOrModI64) \
+ _(UDivOrModI64) \
+ _(WasmUnalignedLoad) \
+ _(WasmUnalignedStore) \
+ _(WasmUnalignedLoadI64) \
+ _(WasmUnalignedStoreI64) \
+ _(WasmTruncateToInt64) \
+ _(Int64ToFloatingPoint)
+
+#endif // jit_mips32_LOpcodes_mips32_h__
diff --git a/js/src/jit/mips32/Lowering-mips32.cpp b/js/src/jit/mips32/Lowering-mips32.cpp
new file mode 100644
index 000000000..650694823
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.cpp
@@ -0,0 +1,258 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Lowering-mips32.h"
+
+#include "jit/mips32/Assembler-mips32.h"
+
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation
+LIRGeneratorMIPS::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart)
+{
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+void
+LIRGeneratorMIPS::visitBox(MBox* box)
+{
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new(alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
+ tempCopy(inner, 0), inner->type()), box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new(alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new(alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void
+LIRGeneratorMIPS::visitUnbox(MUnbox* unbox)
+{
+ MDefinition* inner = unbox->getOperand(0);
+
+ if (inner->type() == MIRType::ObjectOrNull) {
+ LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(inner));
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ defineReuseInput(lir, unbox, 0);
+ return;
+ }
+
+ // An unbox on mips reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir = new(alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload
+ // register.
+ LUnbox* lir = new(alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible())
+ assignSnapshot(lir, unbox->bailoutKind());
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void
+LIRGeneratorMIPS::visitReturn(MReturn* ret)
+{
+ MDefinition* opd = ret->getOperand(0);
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new(alloc()) LReturn;
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void
+LIRGeneratorMIPS::defineUntypedPhi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+
+void
+LIRGeneratorMIPS::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(inputPosition, LUse(operand->virtualRegister() + VREG_TYPE_OFFSET,
+ LUse::ANY));
+ payload->setOperand(inputPosition, LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void
+LIRGeneratorMIPS::defineInt64Phi(MPhi* phi, size_t lirIndex)
+{
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void
+LIRGeneratorMIPS::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex)
+{
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(inputPosition, LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void
+LIRGeneratorMIPS::lowerTruncateDToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new(alloc()) LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorMIPS::lowerTruncateFToInt32(MTruncateToInt32* ins)
+{
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new(alloc()) LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()), ins);
+}
+
+void
+LIRGeneratorMIPS::lowerDivI64(MDiv* div)
+{
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorMIPS::lowerModI64(MMod* mod)
+{
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorMIPS::lowerUDivI64(MDiv* div)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
+ useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void
+LIRGeneratorMIPS::lowerUModI64(MMod* mod)
+{
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
+ useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void
+LIRGeneratorMIPS::visitRandom(MRandom* ins)
+{
+ LRandom *lir = new(alloc()) LRandom(temp(),
+ temp(),
+ temp(),
+ temp(),
+ temp());
+ defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
+}
diff --git a/js/src/jit/mips32/Lowering-mips32.h b/js/src/jit/mips32/Lowering-mips32.h
new file mode 100644
index 000000000..2deb268a8
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Lowering_mips32_h
+#define jit_mips32_Lowering_mips32_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS : public LIRGeneratorMIPSShared
+{
+ protected:
+ LIRGeneratorMIPS(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph)
+ { }
+
+ protected:
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() {
+ return LDefinition::BogusTemp();
+ }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerModI64(MMod* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ public:
+ void visitBox(MBox* box);
+ void visitUnbox(MUnbox* unbox);
+ void visitReturn(MReturn* ret);
+ void visitRandom(MRandom* ins);
+};
+
+typedef LIRGeneratorMIPS LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Lowering_mips32_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32-inl.h b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
new file mode 100644
index 000000000..2dae8fb87
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
@@ -0,0 +1,1077 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_inl_h
+#define jit_mips32_MacroAssembler_mips32_inl_h
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void
+MacroAssembler::move64(Register64 src, Register64 dest)
+{
+ move32(src.low, dest.low);
+ move32(src.high, dest.high);
+}
+
+void
+MacroAssembler::move64(Imm64 imm, Register64 dest)
+{
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+// ===============================================================
+// Logical instructions
+
+void
+MacroAssembler::andPtr(Register src, Register dest)
+{
+ ma_and(dest, src);
+}
+
+void
+MacroAssembler::andPtr(Imm32 imm, Register dest)
+{
+ ma_and(dest, imm);
+}
+
+void
+MacroAssembler::and64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value != int32_t(0xFFFFFFFF))
+ and32(imm.low(), dest.low);
+ if (imm.hi().value != int32_t(0xFFFFFFFF))
+ and32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::and64(Register64 src, Register64 dest)
+{
+ and32(src.low, dest.low);
+ and32(src.high, dest.high);
+}
+
+void
+MacroAssembler::or64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value)
+ or32(imm.low(), dest.low);
+ if (imm.hi().value)
+ or32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::xor64(Imm64 imm, Register64 dest)
+{
+ if (imm.low().value)
+ xor32(imm.low(), dest.low);
+ if (imm.hi().value)
+ xor32(imm.hi(), dest.high);
+}
+
+void
+MacroAssembler::orPtr(Register src, Register dest)
+{
+ ma_or(dest, src);
+}
+
+void
+MacroAssembler::orPtr(Imm32 imm, Register dest)
+{
+ ma_or(dest, imm);
+}
+
+void
+MacroAssembler::or64(Register64 src, Register64 dest)
+{
+ or32(src.low, dest.low);
+ or32(src.high, dest.high);
+}
+
+void
+MacroAssembler::xor64(Register64 src, Register64 dest)
+{
+ ma_xor(dest.low, src.low);
+ ma_xor(dest.high, src.high);
+}
+
+void
+MacroAssembler::xorPtr(Register src, Register dest)
+{
+ ma_xor(dest, src);
+}
+
+void
+MacroAssembler::xorPtr(Imm32 imm, Register dest)
+{
+ ma_xor(dest, imm);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void
+MacroAssembler::addPtr(Register src, Register dest)
+{
+ ma_addu(dest, src);
+}
+
+void
+MacroAssembler::addPtr(Imm32 imm, Register dest)
+{
+ ma_addu(dest, imm);
+}
+
+void
+MacroAssembler::addPtr(ImmWord imm, Register dest)
+{
+ addPtr(Imm32(imm.value), dest);
+}
+
+void
+MacroAssembler::add64(Register64 src, Register64 dest)
+{
+ as_addu(dest.low, dest.low, src.low);
+ as_sltu(ScratchRegister, dest.low, src.low);
+ as_addu(dest.high, dest.high, src.high);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void
+MacroAssembler::add64(Imm32 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, imm);
+ as_addu(dest.low, dest.low, ScratchRegister);
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void
+MacroAssembler::add64(Imm64 imm, Register64 dest)
+{
+ add64(imm.low(), dest);
+ ma_addu(dest.high, dest.high, imm.hi());
+}
+
+void
+MacroAssembler::subPtr(Register src, Register dest)
+{
+ as_subu(dest, dest, src);
+}
+
+void
+MacroAssembler::subPtr(Imm32 imm, Register dest)
+{
+ ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssembler::sub64(Register64 src, Register64 dest)
+{
+ as_sltu(ScratchRegister, dest.low, src.low);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_subu(dest.low, dest.low, src.low);
+ as_subu(dest.high, dest.high, src.high);
+}
+
+void
+MacroAssembler::sub64(Imm64 imm, Register64 dest)
+{
+ ma_li(ScratchRegister, imm.low());
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ ma_subu(dest.low, dest.low, imm.low());
+ ma_subu(dest.high, dest.high, imm.hi());
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ ma_li(ScratchRegister, Imm32(imm.value & LOW_32_MASK));
+ as_multu(dest.high, ScratchRegister);
+ as_mflo(dest.high);
+
+ // mfhi:mflo = LOW(dest) * LOW(imm);
+ as_multu(dest.low, ScratchRegister);
+
+ // HIGH(dest) += mfhi;
+ as_mfhi(ScratchRegister);
+ as_addu(dest.high, dest.high, ScratchRegister);
+
+ if (((imm.value >> 32) & LOW_32_MASK) == 5) {
+ // Optimized case for Math.random().
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ as_sll(ScratchRegister, dest.low, 2);
+ as_addu(ScratchRegister, ScratchRegister, dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+
+ // LOW(dest) = mflo;
+ as_mflo(dest.low);
+ } else {
+ // tmp = mflo
+ as_mflo(SecondScratchReg);
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ ma_li(ScratchRegister, Imm32((imm.value >> 32) & LOW_32_MASK));
+ as_multu(dest.low, ScratchRegister);
+ as_mflo(ScratchRegister);
+ as_addu(dest.high, dest.high, ScratchRegister);
+
+ // LOW(dest) = tmp;
+ ma_move(dest.low, SecondScratchReg);
+ }
+}
+
+void
+MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ MOZ_ASSERT(temp != dest.high && temp != dest.low);
+
+ ma_li(ScratchRegister, imm.firstHalf());
+ as_multu(dest.high, ScratchRegister);
+ as_mflo(dest.high);
+
+ ma_li(ScratchRegister, imm.secondHalf());
+ as_multu(dest.low, ScratchRegister);
+ as_mflo(temp);
+ as_addu(temp, dest.high, temp);
+
+ ma_li(ScratchRegister, imm.firstHalf());
+ as_multu(dest.low, ScratchRegister);
+ as_mfhi(dest.high);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, temp);
+}
+
+void
+MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp)
+{
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ MOZ_ASSERT(dest != src);
+ MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
+
+ as_multu(dest.high, src.low); // (2)
+ as_mflo(dest.high);
+ as_multu(dest.low, src.high); // (3)
+ as_mflo(temp);
+ as_addu(temp, dest.high, temp);
+ as_multu(dest.low, src.low); // (4) + (1)
+ as_mfhi(dest.high);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, temp);
+}
+
+void
+MacroAssembler::neg64(Register64 reg)
+{
+ ma_li(ScratchRegister, Imm32(1));
+ as_movz(ScratchRegister, zero, reg.low);
+ ma_negu(reg.low, reg.low);
+ as_addu(reg.high, reg.high, ScratchRegister);
+ ma_negu(reg.high, reg.high);
+}
+
+void
+MacroAssembler::mulBy3(Register src, Register dest)
+{
+ as_addu(dest, src, src);
+ as_addu(dest, dest, src);
+}
+
+void
+MacroAssembler::inc64(AbsoluteAddress dest)
+{
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_lw(SecondScratchReg, ScratchRegister, 0);
+
+ as_addiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sw(SecondScratchReg, ScratchRegister, 0);
+
+ as_sltiu(SecondScratchReg, SecondScratchReg, 1);
+ as_lw(ScratchRegister, ScratchRegister, 4);
+
+ as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
+
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_sw(SecondScratchReg, ScratchRegister, 4);
+}
+
+// ===============================================================
+// Shift functions
+
+void
+MacroAssembler::lshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sll(dest, dest, imm);
+}
+
+void
+MacroAssembler::lshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_sll(dest.high, dest.high, imm.value);
+ as_srl(scratch, dest.low, 32 - imm.value);
+ as_or(dest.high, dest.high, scratch);
+ as_sll(dest.low, dest.low, imm.value);
+ } else {
+ as_sll(dest.high, dest.low, imm.value - 32);
+ move32(Imm32(0), dest.low);
+ }
+}
+
+void
+MacroAssembler::lshift64(Register unmaskedShift, Register64 dest)
+{
+ Label done, less;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ ma_sll(dest.high, dest.high, shift);
+ ma_subu(shift, shift, Imm32(32));
+ ma_b(shift, Imm32(0), &less, LessThan);
+
+ ma_sll(dest.high, dest.low, shift);
+ move32(Imm32(0), dest.low);
+ ma_b(&done);
+
+ bind(&less);
+ ma_li(SecondScratchReg, Imm32(0));
+ as_subu(shift, SecondScratchReg, shift);
+ ma_srl(SecondScratchReg, dest.low, shift);
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_sll(dest.low, dest.low, shift);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshiftPtr(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_srl(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sra(dest, dest, imm);
+}
+
+void
+MacroAssembler::rshift64(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, 32 - imm.value);
+ as_or(dest.low, dest.low, scratch);
+ as_srl(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ move32(Imm32(0), dest.high);
+ } else {
+ ma_srl(dest.low, dest.high, Imm32(imm.value - 32));
+ move32(Imm32(0), dest.high);
+ }
+}
+
+void
+MacroAssembler::rshift64(Register unmaskedShift, Register64 dest)
+{
+ Label done, less;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.low, dest.low, shift);
+ ma_subu(shift, shift, Imm32(32));
+ ma_b(shift, Imm32(0), &less, LessThan);
+
+ ma_srl(dest.low, dest.high, shift);
+ move32(Imm32(0), dest.high);
+ ma_b(&done);
+
+ bind(&less);
+ ma_li(SecondScratchReg, Imm32(0));
+ as_subu(shift, SecondScratchReg, shift);
+ ma_sll(SecondScratchReg, dest.high, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.high, dest.high, shift);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest)
+{
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, 32 - imm.value);
+ as_or(dest.low, dest.low, scratch);
+ as_sra(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ as_sra(dest.high, dest.high, 31);
+ } else {
+ as_sra(dest.low, dest.high, imm.value - 32);
+ as_sra(dest.high, dest.high, 31);
+ }
+}
+
+void
+MacroAssembler::rshift64Arithmetic(Register unmaskedShift, Register64 dest)
+{
+ Label done, less;
+
+ ScratchRegisterScope shift(*this);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+
+ ma_srl(dest.low, dest.low, shift);
+ ma_subu(shift, shift, Imm32(32));
+ ma_b(shift, Imm32(0), &less, LessThan);
+
+ ma_sra(dest.low, dest.high, shift);
+ as_sra(dest.high, dest.high, 31);
+ ma_b(&done);
+
+ bind(&less);
+ ma_li(SecondScratchReg, Imm32(0));
+ as_subu(shift, SecondScratchReg, shift);
+ ma_sll(SecondScratchReg, dest.high, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_sra(dest.high, dest.high, shift);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void
+MacroAssembler::rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateRight64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_sll(dest.high, input.high, Imm32(amount));
+ ma_srl(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_sll(dest.low, input.low, Imm32(amount));
+ ma_srl(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ }
+ }
+}
+
+void
+MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done, zero;
+
+ ma_and(temp, shift, Imm32(0x3f));
+ ma_b(temp, Imm32(32), &high, GreaterThanOrEqual);
+
+ // high = high << shift | low >> 32 - shift
+ // low = low << shift | high >> 32 - shift
+ ma_sll(dest.high, src.high, temp);
+ ma_b(temp, Imm32(0), &zero, Equal);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+
+ ma_srl(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_sll(dest.low, src.low, temp);
+ ma_srl(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_b(&done);
+
+ bind(&zero);
+ ma_move(dest.low, src.low);
+ ma_move(dest.high, src.high);
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ ma_and(shift, shift, Imm32(0x3f));
+ ma_li(SecondScratchReg, Imm32(64));
+ as_subu(temp, SecondScratchReg, shift);
+
+ ma_srl(dest.high, src.high, temp);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+ ma_sll(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_srl(dest.low, src.low, temp);
+ ma_sll(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateLeft64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_srl(dest.high, input.high, Imm32(amount));
+ ma_sll(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_srl(dest.low, input.low, Imm32(amount));
+ ma_sll(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+ }
+ }
+}
+
+void
+MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, Register temp)
+{
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope shift_value(*this);
+ Label high, done, zero;
+
+ ma_and(temp, shift, Imm32(0x3f));
+ ma_b(temp, Imm32(32), &high, GreaterThanOrEqual);
+
+ // high = high >> shift | low << 32 - shift
+ // low = low >> shift | high << 32 - shift
+ ma_srl(dest.high, src.high, temp);
+ ma_b(temp, Imm32(0), &zero, Equal);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+
+ ma_sll(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_srl(dest.low, src.low, temp);
+
+ //ma_li(SecondScratchReg, Imm32(32));
+ //as_subu(shift_value, SecondScratchReg, shift_value);
+ ma_sll(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ ma_b(&done);
+
+ bind(&zero);
+ ma_move(dest.low, src.low);
+ ma_move(dest.high, src.high);
+ ma_b(&done);
+
+ // A 32 - 64 shift is a 0 - 32 shift in the other direction.
+ bind(&high);
+ ma_and(shift, shift, Imm32(0x3f));
+ ma_li(SecondScratchReg, Imm32(64));
+ as_subu(temp, SecondScratchReg, shift);
+
+ ma_sll(dest.high, src.high, temp);
+ ma_li(SecondScratchReg, Imm32(32));
+ as_subu(shift_value, SecondScratchReg, temp);
+
+ ma_srl(SecondScratchReg, src.low, shift_value);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_sll(dest.low, src.low, temp);
+ ma_srl(SecondScratchReg, src.high, shift_value);
+ as_or(dest.low, dest.low, SecondScratchReg);
+
+ bind(&done);
+}
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+template <typename T1, typename T2>
+void
+MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
+{
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void
+MacroAssembler::clz64(Register64 src, Register dest)
+{
+ Label done, low;
+
+ ma_b(src.high, Imm32(0), &low, Equal);
+ as_clz(dest, src.high);
+ ma_b(&done);
+
+ bind(&low);
+ as_clz(dest, src.low);
+ ma_addu(dest, Imm32(32));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::ctz64(Register64 src, Register dest)
+{
+ Label done, high;
+
+ ma_b(src.low, Imm32(0), &high, Equal);
+
+ ma_ctz(dest, src.low);
+ ma_b(&done);
+
+ bind(&high);
+ ma_ctz(dest, src.high);
+ ma_addu(dest, Imm32(32));
+
+ bind(&done);
+}
+
+void
+MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp)
+{
+ MOZ_ASSERT(dest.low != tmp);
+ MOZ_ASSERT(dest.high != tmp);
+ MOZ_ASSERT(dest.low != dest.high);
+
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+
+ ma_addu(dest.low, dest.high);
+ move32(Imm32(0), dest.high);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+
+ branch32(cond, lhs, val.firstHalf(), label);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), val.secondHalf(), label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
+ Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ load32(rhs, scratch);
+ branch32(cond, lhs, scratch, label);
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch, label);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition invert_cond = Assembler::InvertCondition(cond);
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 = Assembler::ConditionWithoutEqual(invert_cond);
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ ma_b(lhs.high, val.hi(), success, cond1);
+ ma_b(lhs.high, val.hi(), fail, cond2);
+ ma_b(lhs.low, val.low(), success, cond3);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail)
+{
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch(cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition invert_cond = Assembler::InvertCondition(cond);
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 = Assembler::ConditionWithoutEqual(invert_cond);
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ ma_b(lhs.high, rhs.high, success, cond1);
+ ma_b(lhs.high, rhs.high, fail, cond2);
+ ma_b(lhs.low, rhs.low, success, cond3);
+ if (!fallthrough)
+ jump(fail);
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough)
+ bind(fail);
+}
+
+void
+MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label)
+{
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
+ L label)
+{
+ if (cond == Assembler::Zero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ as_or(ScratchRegister, lhs.low, lhs.high);
+ branchTestPtr(cond, ScratchRegister, ScratchRegister, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void
+MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestUndefined(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestInt32(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label)
+{
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, value.payloadReg(), value.payloadReg());
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestDouble(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNumber(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value, Label* label)
+{
+ ma_b(value.payloadReg(), value.payloadReg(), label, b ? NonZero : Zero);
+}
+
+void
+MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestString(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label)
+{
+ Register string = value.payloadReg();
+ SecondScratchRegisterScope scratch2(*this);
+ ma_lw(scratch2, Address(string, JSString::offsetOfLength()));
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void
+MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestSymbol(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestNull(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestObject(cond, value.typeReg(), label);
+}
+
+void
+MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
+{
+ branchTestPrimitive(cond, value.typeReg(), label);
+}
+
+template <class L>
+void
+MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label)
+{
+ ma_b(value.typeReg(), ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label)
+{
+ branchTestMagic(cond, valaddr, label);
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+}
+
+// ========================================================================
+// Memory access primitives.
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
+{
+ ma_sd(src, addr);
+}
+void
+MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
+{
+ MOZ_ASSERT(addr.offset == 0);
+ ma_sd(src, addr);
+}
+
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
+{
+ ma_ss(src, addr);
+}
+void
+MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
+{
+ MOZ_ASSERT(addr.offset == 0);
+ ma_ss(src, addr);
+}
+
+// ========================================================================
+// wasm support
+
+template <class L>
+void
+MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
+{
+ BufferOffset bo = ma_BoundsCheck(ScratchRegister);
+ append(wasm::BoundsCheck(bo.getOffset()));
+
+ ma_b(index, ScratchRegister, label, cond);
+}
+
+void
+MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
+{
+ Instruction* inst = (Instruction*) patchAt;
+ InstImm* i0 = (InstImm*) inst;
+ InstImm* i1 = (InstImm*) i0->next();
+
+ // Replace with new value
+ Assembler::UpdateLuiOriValue(i0, i1, limit);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void
+MacroAssemblerMIPSCompat::incrementInt32Value(const Address& addr)
+{
+ asMasm().add32(Imm32(1), ToPayload(addr));
+}
+
+void
+MacroAssemblerMIPSCompat::computeEffectiveAddress(const BaseIndex& address, Register dest)
+{
+ computeScaledAddress(address, dest);
+ if (address.offset)
+ asMasm().addPtr(Imm32(address.offset), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_inl_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
new file mode 100644
index 000000000..0d3e55e21
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -0,0 +1,2365 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
+static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
+
+static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
+
+void
+MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest)
+{
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src, FloatRegister dest)
+{
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src, FloatRegister dest)
+{
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src, FloatRegister dest)
+{
+ // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray
+ // calls with ScratchDoubleReg as dest.
+ MOZ_ASSERT(dest != SecondScratchDoubleReg);
+
+ // Subtract INT32_MIN to get a positive number
+ ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
+
+ // Convert value
+ as_mtc1(ScratchRegister, dest);
+ as_cvtdw(dest, dest);
+
+ // Add unsigned value of INT32_MIN
+ ma_lid(SecondScratchDoubleReg, 2147483648.0);
+ as_addd(dest, dest, SecondScratchDoubleReg);
+}
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool
+MacroAssemblerMIPSCompat::convertUInt64ToDoubleNeedsTemp()
+{
+ return false;
+}
+
+void
+MacroAssemblerMIPSCompat::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp)
+{
+ MOZ_ASSERT(temp == Register::Invalid());
+ convertUInt32ToDouble(src.high, dest);
+ loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
+ asMasm().mulDouble(ScratchDoubleReg, dest);
+ convertUInt32ToDouble(src.low, ScratchDoubleReg);
+ asMasm().addDouble(ScratchDoubleReg, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src, FloatRegister dest)
+{
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ // We cannot do the same as convertUInt32ToDouble because float32 doesn't
+ // have enough precision.
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToFloat32(src, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest)
+{
+ as_cvtsd(dest, src);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ if (negativeZeroCheck) {
+ moveFromDoubleHi(src, dest);
+ moveFromDoubleLo(src, ScratchRegister);
+ as_movn(dest, zero, ScratchRegister);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ // Convert double to int, then convert back and check if we have the
+ // same number.
+ as_cvtwd(ScratchDoubleReg, src);
+ as_mfc1(dest, ScratchDoubleReg);
+ as_cvtdw(ScratchDoubleReg, ScratchDoubleReg);
+ ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail, bool negativeZeroCheck)
+{
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ // Converting the floating point value to an integer and then converting it
+ // back to a float32 would not work, as float to int32 conversions are
+ // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX
+ // and then back to float(INT32_MAX + 1)). If this ever happens, we just
+ // bail out.
+ as_cvtws(ScratchFloat32Reg, src);
+ as_mfc1(dest, ScratchFloat32Reg);
+ as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg);
+ ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+ // Bail out in the clamped cases.
+ ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest)
+{
+ as_cvtds(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src, FloatRegister dest)
+{
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src, FloatRegister dest)
+{
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, CodeOffset* label)
+{
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->bind(bo.getOffset());
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm)
+{
+ ma_li(dest, Imm32(uint32_t(imm.value)));
+}
+
+// This method generates lui and ori instruction pair that can be modified by
+// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
+{
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+}
+
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
+{
+ ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm)
+{
+ ma_liPatchable(dest, Imm32(int32_t(imm.value)));
+}
+
+// Arithmetic-based ops.
+
+// Add.
+template <typename L>
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow)
+{
+ Label goodAddition;
+ as_addu(rd, rs, rt);
+
+ as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
+ ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, rd);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodAddition);
+}
+
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
+ Register rt, Label* overflow);
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Register rt,
+ wasm::TrapDesc overflow);
+
+template <typename L>
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
+{
+ // Check for signed range because of as_addiu
+ // Check for unsigned range because of as_xori
+ if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
+ Label goodAddition;
+ as_addiu(rd, rs, imm.value);
+
+ // If different sign, no overflow
+ as_xori(ScratchRegister, rs, imm.value);
+ ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, rd);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodAddition);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<Label*>(Register rd, Register rs,
+ Imm32 imm, Label* overflow);
+template void
+MacroAssemblerMIPS::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm,
+ wasm::TrapDesc overflow);
+
+// Subtract.
+void
+MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
+{
+ Label goodSubtraction;
+ // Use second scratch. The instructions generated by ma_b don't use the
+ // second scratch register.
+ as_subu(rd, rs, rt);
+
+ as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
+ ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
+
+ // If different sign, then overflow
+ as_xor(ScratchRegister, rs, rd);
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+ bind(&goodSubtraction);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPS::ma_load(Register dest, Address address,
+ LoadStoreSize size, LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset))
+ {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension)
+ as_lbu(dest, base, encodedOffset);
+ else
+ as_lb(dest, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension)
+ as_lhu(dest, base, encodedOffset);
+ else
+ as_lh(dest, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_lw(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_store(Register data, Address address, LoadStoreSize size,
+ LoadStoreExtension extension)
+{
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address, Register dest)
+{
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_sll(ScratchRegister, address.index, Imm32(shift));
+ as_addu(dest, address.base, ScratchRegister);
+ } else {
+ as_addu(dest, address.base, address.index);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerMIPS::ma_lw(Register data, Address address)
+{
+ ma_load(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Register data, Address address)
+{
+ ma_store(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
+{
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sw(ScratchRegister, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(address.offset));
+ as_addu(SecondScratchReg, address.base, SecondScratchReg);
+ as_sw(ScratchRegister, SecondScratchReg, 0);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address)
+{
+ ma_store(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_pop(Register r)
+{
+ as_lw(r, StackPointer, 0);
+ as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void
+MacroAssemblerMIPS::ma_push(Register r)
+{
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_sw(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind)
+{
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind)
+{
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+{
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot)
+ as_nop();
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ // Leave space for long jump.
+ as_nop();
+ if (delaySlotFill == FillDelaySlot)
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
+{
+ MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset))
+ jumpKind = ShortJump;
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch
+ writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset());
+ ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
+
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional)
+ as_nop();
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
+{
+ ma_lw(ScratchRegister, addr);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
+{
+ ma_lw(ScratchRegister, lhs);
+ ma_cmp_set(dst, ScratchRegister, rhs, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value)
+{
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ } ;
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.hi));
+ moveToDoubleHi(ScratchRegister, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.lo));
+ moveToDoubleLo(ScratchRegister, dest);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest)
+{
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void
+MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
+{
+ moveToDoubleLo(src.payloadReg(), dest);
+ moveToDoubleHi(src.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ls(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
+{
+ // Use single precision load instructions so we don't have to worry about
+ // alignment.
+
+ int32_t off2 = address.offset + TAG_OFFSET;
+ if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
+ as_ls(ft, address.base, address.offset);
+ as_ls(getOddPair(ft), address.base, off2);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
+ as_ls(getOddPair(ft), ScratchRegister, TAG_OFFSET);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
+{
+ int32_t off2 = address.offset + TAG_OFFSET;
+ if (Imm16::IsInSignedRange(address.offset) && Imm16::IsInSignedRange(off2)) {
+ as_ss(ft, address.base, address.offset);
+ as_ss(getOddPair(ft), address.base, off2);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
+ as_ss(getOddPair(ft), ScratchRegister, TAG_OFFSET);
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
+{
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ss(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ss(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void
+MacroAssemblerMIPS::ma_pop(FloatRegister fs)
+{
+ ma_ld(fs.doubleOverlay(0), Address(StackPointer, 0));
+ as_addiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void
+MacroAssemblerMIPS::ma_push(FloatRegister fs)
+{
+ as_addiu(StackPointer, StackPointer, -sizeof(double));
+ ma_sd(fs.doubleOverlay(0), Address(StackPointer, 0));
+}
+
+bool
+MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr)
+{
+ uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS,
+ ExitFrameLayout::Size());
+
+ asMasm().Push(Imm32(descriptor)); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void
+MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::move32(Register src, Register dest)
+{
+ ma_move(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(Register src, Register dest)
+{
+ ma_move(dest, src);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest)
+{
+ ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
+{
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm, Register dest)
+{
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address, Register dest)
+{
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest)
+{
+ ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest)
+{
+ ma_load(dest, src, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address, Register dest)
+{
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPrivate(const Address& address, Register dest)
+{
+ ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const Address& address, FloatRegister dest)
+{
+ ma_ld(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ld(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadUnalignedDouble(const BaseIndex& src, Register temp,
+ FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
+ as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
+ moveToDoubleLo(temp, dest);
+ as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
+ moveToDoubleHi(temp, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
+ moveToDoubleLo(temp, dest);
+ as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ moveToDoubleHi(temp, dest);
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address& address, FloatRegister dest)
+{
+ ma_ls(dest, address);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest)
+{
+ loadFloat32(src, dest);
+ as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const Address& address, FloatRegister dest)
+{
+ ma_ls(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_ls(dest, Address(SecondScratchReg, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
+ FloatRegister dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
+ as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+
+ moveToFloat32(temp, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Register src, const Address& address)
+{
+ ma_store(src, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest)
+{
+ ma_store(src, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Register src, const Address& address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address)
+{
+ ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address)
+{
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Register src, const Address& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address)
+{
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest)
+{
+ ma_store(imm, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest)
+{
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void
+MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address)
+{
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm, Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address)
+{
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm, Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void
+MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address)
+{
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm, Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address);
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, const BaseIndex& address)
+{
+ ma_store(src, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest)
+{
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::storeUnalignedFloat32(FloatRegister src, Register temp,
+ const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
+ as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::storeUnalignedDouble(FloatRegister src, Register temp,
+ const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
+ moveFromDoubleHi(src, temp);
+ as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
+ moveFromDoubleHi(src, temp);
+ as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ }
+}
+
+// Note: this function clobbers the input register.
+void
+MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output)
+{
+ MOZ_ASSERT(input != ScratchDoubleReg);
+ Label positive, done;
+
+ // <= 0 or NaN --> 0
+ zeroDouble(ScratchDoubleReg);
+ branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive);
+ {
+ move32(Imm32(0), output);
+ jump(&done);
+ }
+
+ bind(&positive);
+
+ // Add 0.5 and truncate.
+ loadConstantDouble(0.5, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, input);
+
+ Label outOfRange;
+
+ branchTruncateDoubleMaybeModUint32(input, output, &outOfRange);
+ asMasm().branch32(Assembler::Above, output, Imm32(255), &outOfRange);
+ {
+ // Check if we had a tie.
+ convertInt32ToDouble(output, ScratchDoubleReg);
+ branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done);
+
+ // It was a tie. Mask out the ones bit to get an even value.
+ // See also js_TypedArray_uint8_clamp_double.
+ and32(Imm32(~1), output);
+ jump(&done);
+ }
+
+ // > 255 --> 255
+ bind(&outOfRange);
+ {
+ move32(Imm32(255), output);
+ }
+
+ bind(&done);
+}
+
+// higher level tag testing code
+Operand
+MacroAssemblerMIPSCompat::ToPayload(Operand base)
+{
+ return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
+}
+
+Operand
+MacroAssemblerMIPSCompat::ToType(Operand base)
+{
+ return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::testNullSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
+}
+
+void
+MacroAssemblerMIPSCompat::testObjectSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
+}
+
+void
+MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
+}
+
+// unboxing code
+void
+MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand, Register dest)
+{
+ if (operand.payloadReg() != dest)
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src, Register dest)
+{
+ computeScaledAddress(src, SecondScratchReg);
+ ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand, Register dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand, Register dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ moveToDoubleLo(operand.payloadReg(), dest);
+ moveToDoubleHi(operand.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const Address& src, FloatRegister dest)
+{
+ ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ moveToDoubleLo(ScratchRegister, dest);
+ ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
+ moveToDoubleHi(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand, Register dest)
+{
+ ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src, Register dest)
+{
+ ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest)
+{
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src, AnyRegister dest)
+{
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else if (src.payloadReg() != dest.gpr()) {
+ ma_move(dest.gpr(), src.payloadReg());
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand& src, Register dest)
+{
+ ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxDouble(FloatRegister src, const ValueOperand& dest)
+{
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest)
+{
+ if (src != dest.payloadReg())
+ ma_move(dest.payloadReg(), src);
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest)
+{
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest)
+{
+ convertInt32ToDouble(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest)
+{
+
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest)
+{
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantFloat32(float f, FloatRegister dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest)
+{
+ ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src, FloatRegister dest)
+{
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ ma_ld(dest, src);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest, int32_t shift)
+{
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), SecondScratchReg);
+ loadDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantDouble(double dp, FloatRegister dest)
+{
+ ma_lid(dest, dp);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantDouble(wasm::RawF64 d, FloatRegister dest)
+{
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ } ;
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(d.bits());
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(intStruct.hi));
+ moveToDoubleHi(scratch, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(intStruct.lo));
+ moveToDoubleLo(scratch, dest);
+ }
+}
+
+Register
+MacroAssemblerMIPSCompat::extractObject(const Address& address, Register scratch)
+{
+ ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const Address& address, Register scratch)
+{
+ ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
+ return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address, Register scratch)
+{
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+
+uint32_t
+MacroAssemblerMIPSCompat::getType(const Value& val)
+{
+ return val.toNunboxTag();
+}
+
+void
+MacroAssemblerMIPSCompat::moveData(const Value& val, Register data)
+{
+ if (val.isMarkable())
+ ma_li(data, ImmGCPtr(val.toMarkablePointer()));
+ else
+ ma_li(data, Imm32(val.toNunboxPayload()));
+}
+
+void
+MacroAssemblerMIPSCompat::moveValue(const Value& val, Register type, Register data)
+{
+ MOZ_ASSERT(type != data);
+ ma_li(type, Imm32(getType(val)));
+ moveData(val, data);
+}
+void
+MacroAssemblerMIPSCompat::moveValue(const Value& val, const ValueOperand& dest)
+{
+ moveValue(val, dest.typeReg(), dest.payloadReg());
+}
+
+/* There are 3 paths trough backedge jump. They are listed here in the order
+ * in which instructions are executed.
+ * - The short jump is simple:
+ * b offset # Jumps directly to target.
+ * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
+ *
+ * - The long jump to loop header:
+ * b label1
+ * lui at, addr1_hi # In delay slot. We use the value in 'at' later.
+ * label1:
+ * ori at, addr1_lo
+ * jr at
+ * lui at, addr2_hi # In delay slot. Don't care about 'at' here.
+ *
+ * - The long jump to interrupt loop:
+ * b label2
+ * lui at, addr1_hi # In delay slot. Don't care about 'at' here.
+ * label2:
+ * lui at, addr2_hi
+ * ori at, addr2_lo
+ * jr at
+ * nop # In delay slot.
+ *
+ * The backedge is done this way to avoid patching lui+ori pair while it is
+ * being executed. Look also at jit::PatchBackedge().
+ */
+CodeOffsetJump
+MacroAssemblerMIPSCompat::backedgeJump(RepatchLabel* label, Label* documentation)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+
+ // Backedges are short jumps when bound, but can become long when patched.
+ m_buffer.ensureSpace(8 * sizeof(uint32_t));
+ if (label->bound()) {
+ int32_t offset = label->offset() - bo.getOffset();
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ as_b(BOffImm16(offset));
+ } else {
+ // Jump to "label1" by default to jump to the loop header.
+ as_b(BOffImm16(2 * sizeof(uint32_t)));
+ }
+ // No need for nop here. We can safely put next instruction in delay slot.
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 3 * sizeof(uint32_t));
+ as_jr(ScratchRegister);
+ // No need for nop here. We can safely put next instruction in delay slot.
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 8 * sizeof(uint32_t));
+ return CodeOffsetJump(bo.getOffset());
+}
+
+CodeOffsetJump
+MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentation)
+{
+ // Only one branch per label.
+ MOZ_ASSERT(!label->used());
+ uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ BufferOffset bo = nextOffset();
+ label->use(bo.getOffset());
+ addLongJump(bo);
+ ma_liPatchable(ScratchRegister, Imm32(dest));
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffsetJump(bo.getOffset());
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
+{
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex& dest)
+{
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address& dest)
+{
+ ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest)
+{
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(getType(val)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest)
+{
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr, ValueOperand val)
+{
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
+{
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory.
+ if (src.base != val.payloadReg()) {
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ } else {
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ }
+}
+
+void
+MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+ MOZ_ASSERT(payload != dest.typeReg());
+ ma_li(dest.typeReg(), ImmType(type));
+ if (payload != dest.payloadReg())
+ ma_move(dest.payloadReg(), payload);
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
+{
+ // Allocate stack slots for type and payload. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store type and payload.
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(const Address& addr)
+{
+ // Allocate stack slots for type and payload. One for each.
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ // Store type and payload.
+ ma_lw(ScratchRegister, Address(addr.base, addr.offset + TAG_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
+ ma_lw(ScratchRegister, Address(addr.base, addr.offset + PAYLOAD_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::popValue(ValueOperand val)
+{
+ // Load payload and type.
+ as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
+ as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
+ // Free stack.
+ as_addiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest)
+{
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
+{
+ ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ return;
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value& val, const BaseIndex& dest)
+{
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+
+ moveData(val, ScratchRegister);
+
+ as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, const BaseIndex& dest)
+{
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
+{
+ ma_li(SecondScratchReg, tag);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest)
+{
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ ma_li(ScratchRegister, tag);
+ as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::breakpoint()
+{
+ as_break(0);
+}
+
+void
+MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure)
+{
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void
+MacroAssemblerMIPSCompat::checkStackAlignment()
+{
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void
+MacroAssemblerMIPSCompat::alignStackPointer()
+{
+ movePtr(StackPointer, SecondScratchReg);
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
+ storePtr(SecondScratchReg, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::restoreStackPointer()
+{
+ loadPtr(Address(StackPointer, 0), StackPointer);
+}
+
+void
+MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic)
+{
+ if (framePushed() % ABIStackAlignment != 0) {
+ aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment);
+ reserveStack(aic.alignmentPadding);
+ } else {
+ aic.alignmentPadding = 0;
+ }
+ MOZ_ASSERT(framePushed() % ABIStackAlignment == 0);
+ checkStackAlignment();
+}
+
+void
+MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic)
+{
+ if (aic.alignmentPadding != 0)
+ freeStack(aic.alignmentPadding);
+}
+
+void
+MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(void* handler)
+{
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI(handler);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN),
+ &return_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1, a2);
+ loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jump(a0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the
+ // caller.
+ bind(&return_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
+ loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ ma_move(StackPointer, BaselineFrameReg);
+ pop(BaselineFrameReg);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to caller
+ // frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ profilerExitFrame();
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub.
+ bind(&bailout);
+ loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
+ ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
+ jump(a1);
+}
+
+template<typename T>
+void
+MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register oldval, Register newval,
+ Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint8:
+ compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int16:
+ compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint16:
+ compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int32:
+ compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register oldval, Register newval, Register temp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+template void
+MacroAssemblerMIPSCompat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register oldval, Register newval, Register temp,
+ Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+template<typename T>
+void
+MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output)
+{
+ switch (arrayType) {
+ case Scalar::Int8:
+ atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint8:
+ atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int16:
+ atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint16:
+ atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Int32:
+ atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr());
+ break;
+ case Scalar::Uint32:
+ // At the moment, the code in MCallOptimize.cpp requires the output
+ // type to be double for uint32 arrays. See bug 1077305.
+ MOZ_ASSERT(output.isFloat());
+ atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp);
+ convertUInt32ToDouble(temp, output.fpu());
+ break;
+ default:
+ MOZ_CRASH("Invalid typed array type");
+ }
+}
+
+template void
+MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+template void
+MacroAssemblerMIPSCompat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
+ Register value, Register temp, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+CodeOffset
+MacroAssemblerMIPSCompat::toggledJump(Label* label)
+{
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset
+MacroAssemblerMIPSCompat::toggledCall(JitCode* target, bool enabled)
+{
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+}
+
+void
+MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr, Register scratch)
+{
+ AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation());
+ loadPtr(activation, scratch);
+ storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void
+MacroAssemblerMIPSCompat::profilerExitFrame()
+{
+ branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void
+MacroAssembler::subFromStackPtr(Imm32 imm32)
+{
+ if (imm32.value)
+ asMasm().subPtr(imm32, StackPointer);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void
+MacroAssembler::PushRegsInMask(LiveRegisterSet set)
+{
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ MOZ_ASSERT(diffG == 0);
+
+ // Double values have to be aligned. We reserve extra space so that we can
+ // start writing from the first aligned location.
+ // We reserve a whole extra double so that the buffer has even size.
+ ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
+ reserveStack(diffF + sizeof(double));
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
+ if ((*iter).code() % 2 == 0)
+ as_sd(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+ MOZ_ASSERT(diffF == 0);
+}
+
+void
+MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
+{
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+ // Read the buffer form the first aligned location.
+ ma_addu(SecondScratchReg, sp, Imm32(reservedF + sizeof(double)));
+ ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) {
+ if (!ignore.has(*iter) && ((*iter).code() % 2 == 0))
+ // Use assembly l.d because we have alligned the stack.
+ as_ld(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+ freeStack(reservedF + sizeof(double));
+ MOZ_ASSERT(diffF == 0);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter))
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ freeStack(reservedG);
+ MOZ_ASSERT(diffG == 0);
+}
+
+// ===============================================================
+// ABI function calls.
+
+void
+MacroAssembler::setupUnalignedABICall(Register scratch)
+{
+ setupABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void
+MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
+{
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
+ ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+ if (!enoughMemory_)
+ return;
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void
+MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void
+MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result)
+{
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result)
+{
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Branch functions
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address,
+ Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, address, cond == Assembler::Equal ? &done : label);
+ loadPtr(address, temp);
+ branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value,
+ Register temp, Label* label)
+{
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void
+MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label)
+{
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ moveData(rhs, scratch);
+
+ if (cond == Equal) {
+ Label done;
+ ma_b(lhs.payloadReg(), scratch, &done, NotEqual, ShortJump);
+ {
+ ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, Equal);
+ }
+ bind(&done);
+ } else {
+ ma_b(lhs.payloadReg(), scratch, label, NotEqual);
+
+ ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, NotEqual);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const T& dest, MIRType slotType)
+{
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != slotType)
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+
+ // Store the payload.
+ if (value.constant())
+ storePayload(value.value(), dest);
+ else
+ storePayload(value.reg().typedReg().gpr(), dest);
+}
+
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const Address& dest, MIRType slotType);
+template void
+MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType,
+ const BaseIndex& dest, MIRType slotType);
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
new file mode 100644
index 000000000..4c7618d08
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -0,0 +1,1021 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_h
+#define jit_mips32_MacroAssembler_mips32_h
+
+#include "jsopcode.h"
+
+#include "jit/IonCaches.h"
+#include "jit/JitFrames.h"
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+struct ImmTag : public Imm32
+{
+ ImmTag(JSValueTag mask)
+ : Imm32(int32_t(mask))
+ { }
+};
+
+struct ImmType : public ImmTag
+{
+ ImmType(JSValueType type)
+ : ImmTag(JSVAL_TYPE_TO_TAG(type))
+ { }
+};
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value), "The defaultShift is wrong");
+
+static const uint32_t LOW_32_MASK = (1LL << 32) - 1;
+static const int32_t LOW_32_OFFSET = 0;
+static const int32_t HIGH_32_OFFSET = 4;
+
+class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
+{
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_subTestOverflow;
+
+ void ma_li(Register dest, CodeOffset* label);
+
+ void ma_liPatchable(Register dest, Imm32 imm);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ template <typename L>
+ void ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow);
+ template <typename L>
+ void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow);
+
+ // subtract
+ void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow);
+
+ // memory
+ // shortcut for when we know we're transferring 32 bits of data
+ void ma_lw(Register data, Address address);
+
+ void ma_sw(Register data, Address address);
+ void ma_sw(Imm32 imm, Address address);
+ void ma_sw(Register data, BaseIndex& address);
+
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
+ {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+ void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
+ {
+ ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeWord);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister fd, Address address);
+ void ma_ld(FloatRegister fd, Address address);
+ void ma_sd(FloatRegister fd, Address address);
+ void ma_ss(FloatRegister fd, Address address);
+
+ void ma_pop(FloatRegister fs);
+ void ma_push(FloatRegister fs);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
+ ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
+ }
+ void ma_cmp_set(Register rd, Register rs, Address addr, Condition c);
+ void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) {
+ ma_lw(ScratchRegister, lhs);
+ ma_li(SecondScratchReg, Imm32(uint32_t(imm.value)));
+ ma_cmp_set(dst, ScratchRegister, SecondScratchReg, c);
+ }
+
+ // These fuctions abstract the access to high part of the double precision
+ // float register. It is intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ // :TODO: (Bug 985881) Modify this for N32 ABI to use mthc1 and mfhc1
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_mtc1(src, getOddPair(dest));
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfc1(dest, getOddPair(src));
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
+{
+ public:
+ using MacroAssemblerMIPS::call;
+
+ MacroAssemblerMIPSCompat()
+ { }
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_addu(dest, address.base, Imm32(address.offset));
+ }
+
+ inline void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) {
+ ma_b(dest);
+ }
+
+ void mov(Register src, Register dest) {
+ as_ori(dest, src, 0);
+ }
+ void mov(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(Register src, Address dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+ void mov(Address src, Register dest) {
+ MOZ_CRASH("NYI-IC");
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() {
+ as_nop();
+ }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) {
+ ma_push(reg);
+ }
+ void push(FloatRegister reg) {
+ ma_push(reg);
+ }
+ void pop(Register reg) {
+ ma_pop(reg);
+ }
+ void pop(FloatRegister reg) {
+ ma_pop(reg);
+ }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset label = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset label = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return label;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void jump(Label* label) {
+ ma_b(label);
+ }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) {
+ branch(code);
+ }
+
+ void jump(wasm::TrapDesc target) {
+ ma_b(target);
+ }
+
+ void negl(Register reg) {
+ ma_negu(reg, reg);
+ }
+
+ // Returns the register containing the type tag.
+ Register splitTagForTest(const ValueOperand& value) {
+ return value.typeReg();
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest);
+ void unboxNonDouble(const Address& src, Register dest);
+ void unboxNonDouble(const BaseIndex& src, Register dest);
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); }
+ void unboxValue(const ValueOperand& src, AnyRegister dest);
+ void unboxPrivate(const ValueOperand& src, Register dest);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.payloadReg(), val.payloadReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ Register extractObject(const Address& address, Register scratch);
+ Register extractObject(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractInt32(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractBoolean(const ValueOperand& value, Register scratch) {
+ return value.payloadReg();
+ }
+ Register extractTag(const Address& address, Register scratch);
+ Register extractTag(const BaseIndex& address, Register scratch);
+ Register extractTag(const ValueOperand& value, Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& address, FloatRegister dest);
+ void loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest, int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, FloatRegister dest);
+ void loadConstantDouble(wasm::RawF64 d, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+ void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest);
+
+ // higher level tag testing code
+ Operand ToPayload(Operand base);
+ Address ToPayload(Address base) {
+ return ToPayload(Operand(base)).toAddress();
+ }
+
+ protected:
+ Operand ToType(Operand base);
+ Address ToType(Address base) {
+ return ToType(Operand(base)).toAddress();
+ }
+
+ uint32_t getType(const Value& val);
+ void moveData(const Value& val, Register data);
+ public:
+ void moveValue(const Value& val, Register type, Register data);
+
+ CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr);
+ CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr);
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address, dest.fpu());
+ else
+ ma_lw(dest.gpr(), address);
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat())
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ else
+ load32(address, dest.gpr());
+ }
+
+ template <typename T>
+ void storeUnboxedValue(ConstantOrRegister value, MIRType valueType, const T& dest,
+ MIRType slotType);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) {
+ switch (nbytes) {
+ case 4:
+ store32(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default: MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void moveValue(const Value& val, const ValueOperand& dest);
+
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(),
+ s1 = src.payloadReg(), d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ MOZ_ASSERT(d1 != ScratchRegister);
+ MOZ_ASSERT(d0 != ScratchRegister);
+ move32(d1, ScratchRegister);
+ move32(d0, d1);
+ move32(ScratchRegister, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ mozilla::Swap(s0, s1);
+ mozilla::Swap(d0, d1);
+ }
+
+ if (s0 != d0)
+ move32(s0, d0);
+ if (s1 != d1)
+ move32(s1, d1);
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isMarkable())
+ push(ImmGCPtr(val.toMarkablePointer()));
+ else
+ push(Imm32(val.toNunboxPayload()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+ void pushValue(const Address& addr);
+
+ void storePayload(const Value& val, Address dest);
+ void storePayload(Register src, Address dest);
+ void storePayload(const Value& val, const BaseIndex& dest);
+ void storePayload(Register src, const BaseIndex& dest);
+ void storeTypeTag(ImmTag tag, Address dest);
+ void storeTypeTag(ImmTag tag, const BaseIndex& dest);
+
+ void handleFailureWithHandlerTail(void* handler);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ template<typename T>
+ void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output);
+ }
+
+ template<typename T>
+ void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T>
+ void atomicExchange32(const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicAdd8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAdd32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchSub32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicSub8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicSub32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicAnd8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicAnd32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchOr32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicOr8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicOr32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T, typename S>
+ void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template<typename T, typename S>
+ void atomicFetchXor32(const S& value, const T& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp, Register output)
+ {
+ atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output);
+ }
+ template <typename T, typename S>
+ void atomicXor8(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor16(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+ template <typename T, typename S>
+ void atomicXor32(const T& value, const S& mem, Register flagTemp,
+ Register valueTemp, Register offsetTemp, Register maskTemp)
+ {
+ atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp);
+ }
+
+ template<typename T>
+ void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval,
+ Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ template<typename T>
+ void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
+ Register temp, Register valueTemp, Register offsetTemp, Register maskTemp,
+ AnyRegister output);
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+ void load64(const Address& address, Register64 dest) {
+ load32(Address(address.base, address.offset + INT64LOW_OFFSET), dest.low);
+ int32_t highOffset = (address.offset < 0) ? -int32_t(INT64HIGH_OFFSET) : INT64HIGH_OFFSET;
+ load32(Address(address.base, address.offset + highOffset), dest.high);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
+ void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
+ void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+ void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+ void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ // NOTE: This will use second scratch on MIPS. Only ARM needs the
+ // implementation without second scratch.
+ void store32_NoSecondScratch(Imm32 src, const Address& address) {
+ store32(src, address);
+ }
+
+ void store64(Register64 src, Address address) {
+ store32(src.low, Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(src.high, Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ void store64(Imm64 imm, Address address) {
+ store32(imm.low(), Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ template <typename T> void storePtr(ImmWord imm, T address);
+ template <typename T> void storePtr(ImmPtr imm, T address);
+ template <typename T> void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
+ void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ as_movd(dest, src);
+ }
+
+ void zeroDouble(FloatRegister reg) {
+ moveToDoubleLo(zero, reg);
+ moveToDoubleHi(zero, reg);
+ }
+
+ static bool convertUInt64ToDoubleNeedsTemp();
+ void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp);
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ void alignStackPointer();
+ void restoreStackPointer();
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ public:
+ CodeOffset labelForPatch() {
+ return CodeOffset(nextOffset().getOffset());
+ }
+
+ void lea(Operand addr, Register dest) {
+ ma_addu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ void ma_storeImm(Imm32 imm, const Address& addr) {
+ ma_sw(imm, addr);
+ }
+
+ BufferOffset ma_BoundsCheck(Register bounded) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(bounded, ImmWord(0));
+ return bo;
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest);
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
+ ma_addu(GlobalReg, Imm32(WasmGlobalRegBias));
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_h */
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.cpp b/js/src/jit/mips32/MoveEmitter-mips32.cpp
new file mode 100644
index 000000000..7b5a8996f
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.cpp
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MoveEmitter-mips32.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void
+MoveEmitterMIPS::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.storeFloat32(temp, cycleSlot(slotId, 0));
+ masm.storeFloat32(temp, cycleSlot(slotId, 4));
+ } else {
+ // Just always store the largest possible size.
+ masm.storeDouble(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId, 0));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0, 0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0, 0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId)
+{
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId, 0), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ uint32_t offset = 0;
+ if (from.floatReg().numAlignedAliased() == 1)
+ offset = sizeof(float);
+ masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId, 0), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId, 0), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0, 0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0, 0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void
+MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
+{
+ // Ensure that we can use ScratchDoubleReg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.moveFromDoubleLo(from.floatReg(), a2);
+ masm.moveFromDoubleHi(from.floatReg(), a3);
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.loadPtr(getAdjustedAddress(from), a2);
+ masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.h b/js/src/jit/mips32/MoveEmitter-mips32.h
new file mode 100644
index 000000000..8d8d1c0c1
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MoveEmitter_mips32_h
+#define jit_mips32_MoveEmitter_mips32_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS : public MoveEmitterMIPSShared
+{
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS(MacroAssembler& masm)
+ : MoveEmitterMIPSShared(masm)
+ { }
+};
+
+typedef MoveEmitterMIPS MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MoveEmitter_mips32_h */
diff --git a/js/src/jit/mips32/SharedIC-mips32.cpp b/js/src/jit/mips32/SharedIC-mips32.cpp
new file mode 100644
index 000000000..9a9c85ac8
--- /dev/null
+++ b/js/src/jit/mips32/SharedIC-mips32.cpp
@@ -0,0 +1,177 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsiter.h"
+
+#include "jit/BaselineCompiler.h"
+#include "jit/BaselineIC.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Linker.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jsboolinlines.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+// ICBinaryArith_Int32
+
+bool
+ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ // Guard that R0 is an integer and R1 is an integer.
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+ masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
+
+ // Add R0 and R1. Don't need to explicitly unbox, just use R2's payloadReg.
+ Register scratchReg = R2.payloadReg();
+
+ // DIV and MOD need an extra non-volatile ValueOperand to hold R0.
+ AllocatableGeneralRegisterSet savedRegs(availableGeneralRegs(2));
+ savedRegs.set() = GeneralRegisterSet::Intersect(GeneralRegisterSet::NonVolatile(), savedRegs.set());
+
+ Label goodMul, divTest1, divTest2;
+ switch(op_) {
+ case JSOP_ADD:
+ // We know R0.typeReg() already contains the integer tag. No boxing
+ // required.
+ masm.ma_addTestOverflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_SUB:
+ masm.ma_subTestOverflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ case JSOP_MUL: {
+ masm.ma_mul_branch_overflow(scratchReg, R0.payloadReg(), R1.payloadReg(), &failure);
+
+ masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump);
+
+ // Result is -0 if operands have different signs.
+ masm.as_xor(t8, R0.payloadReg(), R1.payloadReg());
+ masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+
+ masm.bind(&goodMul);
+ masm.move32(scratchReg, R0.payloadReg());
+ break;
+ }
+ case JSOP_DIV:
+ case JSOP_MOD: {
+ // Check for INT_MIN / -1, it results in a double.
+ masm.ma_b(R0.payloadReg(), Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R1.payloadReg(), Imm32(-1), &failure, Assembler::Equal, ShortJump);
+ masm.bind(&divTest1);
+
+ // Check for division by zero
+ masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::Equal, ShortJump);
+
+ // Check for 0 / X with X < 0 (results in -0).
+ masm.ma_b(R0.payloadReg(), Imm32(0), &divTest2, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R1.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&divTest2);
+
+ masm.as_div(R0.payloadReg(), R1.payloadReg());
+
+ if (op_ == JSOP_DIV) {
+ // Result is a double if the remainder != 0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump);
+ masm.as_mflo(scratchReg);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ } else {
+ Label done;
+ // If X % Y == 0 and X < 0, the result is -0.
+ masm.as_mfhi(scratchReg);
+ masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ masm.ma_b(R0.payloadReg(), Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ masm.bind(&done);
+ masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0);
+ }
+ break;
+ }
+ case JSOP_BITOR:
+ masm.as_or(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_BITXOR:
+ masm.as_xor(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_BITAND:
+ masm.as_and(R0.payloadReg() , R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_LSH:
+ // MIPS will only use 5 lowest bits in R1 as shift offset.
+ masm.ma_sll(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_RSH:
+ masm.ma_sra(R0.payloadReg(), R0.payloadReg(), R1.payloadReg());
+ break;
+ case JSOP_URSH:
+ masm.ma_srl(scratchReg, R0.payloadReg(), R1.payloadReg());
+ if (allowDouble_) {
+ Label toUint;
+ masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump);
+
+ // Move result and box for return.
+ masm.move32(scratchReg, R0.payloadReg());
+ EmitReturnFromIC(masm);
+
+ masm.bind(&toUint);
+ masm.convertUInt32ToDouble(scratchReg, FloatReg1);
+ masm.boxDouble(FloatReg1, R0);
+ } else {
+ masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump);
+ // Move result for return.
+ masm.move32(scratchReg, R0.payloadReg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unhandled op for BinaryArith_Int32.");
+ }
+
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+
+ return true;
+}
+
+bool
+ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
+{
+ Label failure;
+ masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
+
+ switch (op) {
+ case JSOP_BITNOT:
+ masm.not32(R0.payloadReg());
+ break;
+ case JSOP_NEG:
+ // Guard against 0 and MIN_INT, both result in a double.
+ masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(INT32_MAX), &failure);
+
+ masm.neg32(R0.payloadReg());
+ break;
+ default:
+ MOZ_CRASH("Unexpected op");
+ return false;
+ }
+
+ EmitReturnFromIC(masm);
+
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips32/SharedICRegisters-mips32.h b/js/src/jit/mips32/SharedICRegisters-mips32.h
new file mode 100644
index 000000000..78c124d90
--- /dev/null
+++ b/js/src/jit/mips32/SharedICRegisters-mips32.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_SharedICRegisters_mips32_h
+#define jit_mips32_SharedICRegisters_mips32_h
+
+#include "jit/MacroAssembler.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = s5;
+static constexpr Register BaselineStackReg = sp;
+
+static constexpr ValueOperand R0(a3, a2);
+static constexpr ValueOperand R1(s7, s6);
+static constexpr ValueOperand R2(t7, t6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t5;
+
+static constexpr Register ExtractTemp0 = InvalidReg;
+static constexpr Register ExtractTemp1 = InvalidReg;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_SharedICRegisters_mips32_h */
diff --git a/js/src/jit/mips32/Simulator-mips32.cpp b/js/src/jit/mips32/Simulator-mips32.cpp
new file mode 100644
index 000000000..ae2e9d4f3
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -0,0 +1,3519 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips32/Simulator-mips32.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+
+#include "jit/mips32/Assembler-mips32.h"
+#include "vm/Runtime.h"
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr = op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static bool
+HaveSameSign(int32_t a, int32_t b)
+{
+ return ((a ^ b) >= 0);
+}
+
+static uint32_t
+GetFCSRConditionBit(uint32_t cc)
+{
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static const int32_t kRegisterskMaxValue = 0x7fffffff;
+static const int32_t kRegisterskMinValue = 0x80000000;
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction
+{
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const {
+ return (instructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kRegisterType,
+ kImmediateType,
+ kJumpType,
+ kUnsupported = -1
+ };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline Opcode opcodeValue() const {
+ return static_cast<Opcode>(bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const {
+ return bits(FDShift + FDBits - 1, FDShift);
+ }
+
+ inline int fsValue() const {
+ return bits(FSShift + FSBits - 1, FSShift);
+ }
+
+ inline int ftValue() const {
+ return bits(FTShift + FTBits - 1, FTShift);
+ }
+
+ inline int frValue() const {
+ return bits(FRShift + FRBits - 1, FRShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline Opcode opcodeFieldRaw() const {
+ return static_cast<Opcode>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const {
+ return instructionBits() & RSMask;
+ }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ Opcode op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a break or a trap.
+ bool isTrap() const;
+
+ private:
+
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool
+SimInstruction::isForbiddenInBranchDelay() const
+{
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ }
+}
+
+bool
+SimInstruction::isLinkingInstruction() const
+{
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool
+SimInstruction::isTrap() const
+{
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return true;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type
+SimInstruction::instructionType() const
+{
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_break:
+ case ff_sll:
+ case ff_srl:
+ case ff_sra:
+ case ff_sllv:
+ case ff_srlv:
+ case ff_srav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_multu:
+ case ff_div:
+ case ff_divu:
+ case ff_add:
+ case ff_addu:
+ case ff_sub:
+ case ff_subu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_clz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_ext:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ case op_sb:
+ case op_sh:
+ case op_swl:
+ case op_sw:
+ case op_swr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ }
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * SimInstruction::kInstrSize;
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) {
+ return &data_[offset];
+ }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex>
+{
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache(Simulator* sim)
+ : Base(sim->cacheLock_)
+ , sim_(sim)
+ {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isNothing());
+#ifdef DEBUG
+ sim_->cacheLockHolder_ = mozilla::Some(ThisThread::GetId());
+#endif
+ }
+
+ ~AutoLockSimulatorCache() {
+ MOZ_ASSERT(sim_->cacheLockHolder_.isSome());
+#ifdef DEBUG
+ sim_->cacheLockHolder_.reset();
+#endif
+ }
+
+ private:
+ Simulator* const sim_;
+};
+
+bool Simulator::ICacheCheckingEnabled = false;
+
+int Simulator::StopSimAt = -1;
+
+Simulator*
+Simulator::Create(JSContext* cx)
+{
+ Simulator* sim = js_new<Simulator>();
+ if (!sim)
+ return nullptr;
+
+ if (!sim->init()) {
+ js_delete(sim);
+ return nullptr;
+ }
+
+ if (getenv("MIPS_SIM_ICACHE_CHECKS"))
+ Simulator::ICacheCheckingEnabled = true;
+
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ int64_t stopAt;
+ if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim;
+}
+
+void
+Simulator::Destroy(Simulator* sim)
+{
+ js_delete(sim);
+}
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger
+{
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int32_t getRegisterValue(int regnum);
+ int32_t getFPURegisterValueInt(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void
+UNSUPPORTED()
+{
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void
+MipsDebugger::stop(SimInstruction* instr)
+{
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+ SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int32_t
+MipsDebugger::getRegisterValue(int regnum)
+{
+ if (regnum == kPCRegister)
+ return sim_->get_pc();
+ return sim_->getRegister(regnum);
+}
+
+int32_t MipsDebugger::getFPURegisterValueInt(int regnum)
+{
+ return sim_->getFpuRegister(regnum);
+}
+
+int64_t
+MipsDebugger::getFPURegisterValueLong(int regnum)
+{
+ return sim_->getFpuRegisterLong(regnum);
+}
+
+float
+MipsDebugger::getFPURegisterValueFloat(int regnum)
+{
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double
+MipsDebugger::getFPURegisterValueDouble(int regnum)
+{
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool
+MipsDebugger::getValue(const char* desc, int32_t* value)
+{
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%i", value) == 1;
+}
+
+bool
+MipsDebugger::setBreakpoint(SimInstruction* breakpc)
+{
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr)
+ return false;
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+
+}
+
+bool
+MipsDebugger::deleteBreakpoint(SimInstruction* breakpc)
+{
+ if (sim_->break_pc_ != nullptr)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void
+MipsDebugger::undoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+}
+
+void
+MipsDebugger::redoBreakpoints()
+{
+ if (sim_->break_pc_)
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+}
+
+void
+MipsDebugger::printAllRegs()
+{
+ int32_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%08x %10d ", Registers::GetName(i), value, value);
+
+ if (i % 2)
+ printf("\n");
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%08x %10d ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%08x %10d\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%08x\n", value);
+}
+
+void
+MipsDebugger::printAllRegsIncludingFPU()
+{
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::RegisterIdLimit; i++) {
+ if (i & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n",
+ FloatRegisters::GetName(i),
+ getFPURegisterValueInt(i),
+ getFPURegisterValueFloat(i));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i),
+ getFPURegisterValueInt(i),
+ getFPURegisterValueFloat(i),
+ getFPURegisterValueDouble(i));
+ }
+ }
+
+}
+
+static char*
+ReadLine(const char* prompt)
+{
+ char* result = nullptr;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ if (result)
+ js_delete(result);
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result = (char*)js_malloc(len + 1);
+ if (!result)
+ return nullptr;
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = (char*)js_malloc(new_len);
+ if (!new_result)
+ return nullptr;
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result, offset * sizeof(char));
+ js_free(result);
+ result = new_result;
+ }
+ // Copy the newly read line into the result.
+ memcpy(result + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result;
+}
+
+static void
+DisassembleInstruction(uint32_t pc)
+{
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2], bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd, "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mipsel -mcpu=mips32r2 | "
+ "grep -v pure_instructions | grep -v .text\"", static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd))
+ printf("Cannot disassemble instruction.\n");
+}
+
+void
+MipsDebugger::debug()
+{
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr = reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!(instr->isTrap()) ||
+ instr->instructionBits() == kCallRedirInstr) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int32_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Code fCode = FloatRegister::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fCode != FloatRegisters::Invalid) {
+ if (fCode & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode),
+ getFPURegisterValueDouble(fCode));
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int32_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%08x %10d", cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) ||
+ (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint32_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value)))
+ printf("setting breakpoint failed\n");
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() -
+ 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address =
+ reinterpret_cast<SimInstruction*>(stop_pc +
+ SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1;
+ i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool
+AllOnOnePage(uintptr_t start, int size)
+{
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void
+Simulator::setLastDebuggerInput(char* input)
+{
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage*
+GetCachePageLocked(Simulator::ICacheMap& i_cache, void* page)
+{
+ Simulator::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p)
+ return p->value();
+
+ CachePage* new_page = js_new<CachePage>();
+ if (!i_cache.add(p, page, new_page))
+ return nullptr;
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void
+FlushOnePageLocked(Simulator::ICacheMap& i_cache, intptr_t start, int size)
+{
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void
+FlushICacheLocked(Simulator::ICacheMap& i_cache, void* start_addr, size_t size)
+{
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+static void
+CheckICacheLocked(Simulator::ICacheMap& i_cache, SimInstruction* instr)
+{
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ MOZ_ASSERT(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->cachedData(offset),
+ SimInstruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber
+Simulator::ICacheHasher::hash(const Lookup& l)
+{
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool
+Simulator::ICacheHasher::match(const Key& k, const Lookup& l)
+{
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+void
+Simulator::FlushICache(void* start_addr, size_t size)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ Simulator* sim = Simulator::Current();
+ AutoLockSimulatorCache als(sim);
+ js::jit::FlushICacheLocked(sim->icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator()
+ : cacheLock_(mutexid::SimulatorCacheLock)
+{
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ resume_pc_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++)
+ exceptions[i] = 0;
+
+ lastDebuggerInput_ = nullptr;
+
+ redirection_ = nullptr;
+}
+
+bool
+Simulator::init()
+{
+ if (!icache_.init())
+ return false;
+
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = static_cast<char*>(js_malloc(stackSize));
+ if (!stack_)
+ return false;
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = sim->redirection();
+ if (Simulator::ICacheCheckingEnabled)
+ FlushICacheLocked(sim->icache(), addressOfSwiInstruction(), SimInstruction::kInstrSize);
+ sim->setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ Simulator* sim = Simulator::Current();
+
+ AutoLockSimulatorCache als(sim);
+
+ Redirection* current = sim->redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection));
+ if (!redir) {
+ MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection",
+ __FILE__, __LINE__);
+ MOZ_CRASH();
+ }
+ new(redir) Redirection(nativeFunction, type, sim);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator()
+{
+ js_free(stack_);
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */ void*
+Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type)
+{
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator*
+Simulator::Current()
+{
+ return TlsPerThreadData.get()->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int32_t value)
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void
+Simulator::setFpuRegister(int fpureg, int32_t value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void
+Simulator::setFpuRegisterFloat(int fpureg, float value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void
+Simulator::setFpuRegisterFloat(int fpureg, int64_t value)
+{
+ setFpuRegister(fpureg, value & 0xffffffff);
+ setFpuRegister(fpureg + 1, value >> 32);
+}
+
+void
+Simulator::setFpuRegisterDouble(int fpureg, double value)
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)
+ && ((fpureg % 2) == 0));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void
+Simulator::setFpuRegisterDouble(int fpureg, int64_t value)
+{
+ setFpuRegister(fpureg, value & 0xffffffff);
+ setFpuRegister(fpureg + 1, value >> 32);
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t
+Simulator::getRegister(int reg) const
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+double
+Simulator::getDoubleFromRegisterPair(int reg)
+{
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ memcpy(&dm_val, &registers_[reg], sizeof(dm_val));
+ return(dm_val);
+}
+
+int32_t
+Simulator::getFpuRegister(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int64_t
+Simulator::getFpuRegisterLong(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)
+ && ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<int64_t*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+float
+Simulator::getFpuRegisterFloat(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+double
+Simulator::getFpuRegisterDouble(int fpureg) const
+{
+ MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)
+ && ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or f12 and f14.
+void
+Simulator::getFpArgs(double* x, double* y, int32_t* z)
+{
+ *x = getFpuRegisterDouble(12);
+ *y = getFpuRegisterDouble(14);
+ *z = getRegister(a2);
+}
+
+void
+Simulator::getFpFromStack(int32_t* stack, double* x)
+{
+ MOZ_ASSERT(stack);
+ MOZ_ASSERT(x);
+ memcpy(x, stack, sizeof(double));
+}
+
+void
+Simulator::setCallResultDouble(double result)
+{
+ setFpuRegisterDouble(f0, result);
+}
+
+void
+Simulator::setCallResultFloat(float result)
+{
+ setFpuRegisterFloat(f0, result);
+}
+
+void
+Simulator::setCallResult(int64_t res)
+{
+ setRegister(v0, static_cast<int32_t>(res));
+ setRegister(v1, static_cast<int32_t>(res >> 32));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void
+Simulator::setFCSRBit(uint32_t cc, bool value)
+{
+ if (value)
+ FCSR_ |= (1 << cc);
+ else
+ FCSR_ &= ~(1 << cc);
+}
+
+bool
+Simulator::testFCSRBit(uint32_t cc)
+{
+ return FCSR_ & (1 << cc);
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool
+Simulator::setFCSRRoundError(double original, double rounded)
+{
+ bool ret = false;
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void
+Simulator::set_pc(int32_t value)
+{
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool
+Simulator::has_bad_pc() const
+{
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t
+Simulator::get_pc() const
+{
+ return registers_[pc];
+}
+
+// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
+// interrupt is caused. On others it does a funky rotation thing. For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour. Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator. Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+int
+Simulator::readW(uint32_t addr, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeW(uint32_t addr, int value, SimInstruction* instr)
+{
+ if (addr < 0x400) {
+ // This has to be a NULL-dereference, drop into debugger.
+ printf("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double
+Simulator::readD(uint32_t addr, SimInstruction* instr)
+{
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeD(uint32_t addr, double value, SimInstruction* instr)
+{
+ if ((addr & kDoubleAlignmentMask) == 0) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint16_t
+Simulator::readHU(uint32_t addr, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t
+Simulator::readH(uint32_t addr, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void
+Simulator::writeH(uint32_t addr, uint16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void
+Simulator::writeH(uint32_t addr, int16_t value, SimInstruction* instr)
+{
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t
+Simulator::readBU(uint32_t addr)
+{
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int32_t
+Simulator::readB(uint32_t addr)
+{
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void
+Simulator::writeB(uint32_t addr, uint8_t value)
+{
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void
+Simulator::writeB(uint32_t addr, int8_t value)
+{
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uintptr_t
+Simulator::stackLimit() const
+{
+ return stackLimit_;
+}
+
+uintptr_t*
+Simulator::addressOfStackLimit()
+{
+ return &stackLimit_;
+}
+
+bool
+Simulator::overRecursed(uintptr_t newsp) const
+{
+ if (newsp == 0)
+ newsp = getRegister(sp);
+ return newsp <= stackLimit();
+}
+
+bool
+Simulator::overRecursedWithExtra(uint32_t extra) const
+{
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void
+Simulator::format(SimInstruction* instr, const char* format)
+{
+ printf("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int32_t arg0);
+typedef int64_t (*Prototype_General2)(int32_t arg0, int32_t arg1);
+typedef int64_t (*Prototype_General3)(int32_t arg0, int32_t arg1, int32_t arg2);
+typedef int64_t (*Prototype_General4)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3);
+typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4);
+typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5);
+typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5, int32_t arg6);
+typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5, int32_t arg6, int32_t arg7);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef int32_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int64_Double)(double arg0);
+typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1, int32_t arg2);
+typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1, int32_t arg2,
+ int32_t arg3);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+
+typedef double (*Prototype_DoubleInt)(double arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntInt)(int32_t arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void
+Simulator::softwareInterrupt(SimInstruction* instr)
+{
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_O32_ABI)
+ MOZ_CRASH("Only O32 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = getRegister(a0);
+ int32_t arg1 = getRegister(a1);
+ int32_t arg2 = getRegister(a2);
+ int32_t arg3 = getRegister(a3);
+
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(getRegister(sp));
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ int32_t arg4 = stack_pointer[4];
+ int32_t arg5 = stack_pointer[5];
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int32_t saved_ra = getRegister(ra);
+
+ intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target = reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target = reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target = reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target = reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target = reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target = reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target = reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target = reinterpret_cast<Prototype_General7>(external);
+ int32_t arg6 = stack_pointer[6];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target = reinterpret_cast<Prototype_General8>(external);
+ int32_t arg6 = stack_pointer[6];
+ int32_t arg7 = stack_pointer[7];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target = reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external);
+ int32_t res = target(dval0);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int64_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int64_Double target = reinterpret_cast<Prototype_Int64_Double>(external);
+ int64_t result = target(dval0);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int32_t res = target(dval, arg2, arg3);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDoubleIntInt target = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int32_t res = target(arg0, dval, arg4, arg5);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_Double target = reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target = reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntInt: {
+ Prototype_Double_IntInt target = reinterpret_cast<Prototype_Double_IntInt>(external);
+ double dresult = target(arg0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_DoubleInt target = reinterpret_cast<Prototype_DoubleInt>(external);
+ double dresult = target(dval0, ival);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Double_IntDouble target = reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(ival, dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDouble target = reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int32_t result = target(ival, dval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0, dval1, dval2;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the last argument is on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ Prototype_Double_DoubleDoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0, dval1, dval2, dval3;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the two last arguments are on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ getFpFromStack(stack_pointer + 6, &dval3);
+ Prototype_Double_DoubleDoubleDoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ default:
+ MOZ_CRASH("call");
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool
+Simulator::isWatchpoint(uint32_t code)
+{
+ return (code <= kMaxWatchpointCode);
+}
+
+void
+Simulator::printWatchpoint(uint32_t code)
+{
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void
+Simulator::handleStop(uint32_t code, SimInstruction* instr)
+{
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool
+Simulator::isStopInstruction(SimInstruction* instr)
+{
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+bool
+Simulator::isEnabledStop(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void
+Simulator::enableStop(uint32_t code)
+{
+ if (!isEnabledStop(code))
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+}
+
+void
+Simulator::disableStop(uint32_t code)
+{
+ if (isEnabledStop(code))
+ watchedStops_[code].count_ |= kStopDisabledBit;
+}
+
+void
+Simulator::increaseStopCounter(uint32_t code)
+{
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf("Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n", code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void
+Simulator::printStopInfo(uint32_t code)
+{
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n",
+ code, code, state, count);
+ }
+ }
+}
+
+void
+Simulator::signalExceptions()
+{
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0)
+ MOZ_CRASH("Error: Exception raised.");
+ }
+}
+
+// Handle execution based on instruction types.
+void
+Simulator::configureTypeRegister(SimInstruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt)
+{
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const Opcode op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = rt << sa;
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
+ break;
+ case ff_sra:
+ alu_out = rt >> sa;
+ break;
+ case ff_sllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
+ break;
+ case ff_srav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_multu:
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_add:
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue - rt);
+ }
+ }
+ alu_out = rs + rt;
+ break;
+ case ff_addu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ if (!HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue + rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue + rt);
+ }
+ }
+ alu_out = rs - rt;
+ break;
+ case ff_subu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = rs < rt ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = rs_u < rt_u ? 1 : 0;
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = rs_u >= rt_u;
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = rs_u < rt_u;
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ case ff_divu:
+ // div and divu never raise exceptions.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ break;
+ case ff_clz:
+ alu_out = rs_u ? __builtin_clz(rs_u) : 32;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case ff_ext: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void
+Simulator::decodeTypeRegister(SimInstruction* instr)
+{
+ // Instruction fields.
+ const Opcode op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr,
+ alu_out,
+ i64hilo,
+ u64hilo,
+ next_pc,
+ return_addr_reg,
+ do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ setRegister(rt_reg, alu_out);
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ FPUregisters_[fs_reg] = registers_[rt_reg];
+ break;
+ case rs_mthc1:
+ MOZ_CRASH();
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ff_round_w_fmt: { // Round double to word (round half to even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt: { // Mips32r2: Truncate float to 64-bit long-word.
+ float rounded = truncf(fs_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ }
+ case ff_round_l_fmt: { // Mips32r2 instruction.
+ float rounded =
+ fs_value > 0 ? std::floor(fs_value + 0.5) : std::ceil(fs_value - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips32r2 instruction.
+ float rounded = truncf(fs_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ }
+ case ff_floor_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::floor(fs_value));
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ case ff_ceil_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::ceil(fs_value));
+ setFpuRegisterFloat(fd_reg, i64);
+ break;
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ dt_value = getFpuRegisterDouble(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ff_round_w_fmt: { // Round double to word (round half to even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards 0).
+ double rounded = trunc(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt: { // Mips32r2: Truncate double to 64-bit long-word.
+ double rounded = trunc(ds_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips32r2 instruction.
+ double rounded = trunc(ds_value);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ }
+ case ff_round_l_fmt: { // Mips32r2 instruction.
+ double rounded =
+ ds_value > 0 ? std::floor(ds_value + 0.5) : std::ceil(ds_value - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ }
+ case ff_floor_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::floor(ds_value));
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ case ff_ceil_l_fmt: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(std::ceil(ds_value));
+ setFpuRegisterDouble(fd_reg, i64);
+ break;
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(alu_out));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(alu_out));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt: // Mips32r2 instruction.
+ // Watch the signs here, we want 2 32-bit vals
+ // to make a sign-64.
+ i64 = static_cast<uint32_t>(getFpuRegister(fs_reg));
+ i64 |= static_cast<int64_t>(getFpuRegister(fs_reg + 1)) << 32;
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ case ff_cvt_s_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ setRegister(return_addr_reg, current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case ff_multu:
+ setRegister(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ case ff_div:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ setRegister(LO, INT_MIN);
+ setRegister(HI, 0);
+ } else if (rt != 0) {
+ setRegister(LO, rs / rt);
+ setRegister(HI, rs % rt);
+ }
+ break;
+ case ff_divu:
+ if (rt_u != 0) {
+ setRegister(LO, rs_u / rt_u);
+ setRegister(HI, rs_u % rt_u);
+ }
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt) setRegister(rd_reg, rs);
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ } else {
+ if (!testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt) setRegister(rd_reg, rs);
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ }
+}
+
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+void
+Simulator::decodeTypeImmediate(SimInstruction* instr)
+{
+ // Instruction fields.
+ Opcode op = instr->opcodeFieldRaw();
+ int32_t rs = getRegister(instr->rsValue());
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int32_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc.
+ int32_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int32_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint32_t addr = 0x0;
+ // Value to be written in memory.
+ uint32_t mem_value = 0x0;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ default:
+ break;
+ }
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we don't
+ // need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - se_imm16);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] =
+ rs < (kRegisterskMinValue - se_imm16);
+ }
+ }
+ alu_out = rs + se_imm16;
+ break;
+ case op_addiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (oe_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, static_cast<int8_t>(rt));
+ break;
+ case op_sh:
+ writeH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_sw:
+ writeW(addr, rt, instr);
+ break;
+ case op_swr:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegister(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ addr = rs + se_imm16;
+ writeW(addr, getFpuRegister(ft_reg), instr);
+ break;
+ case op_sdc1:
+ addr = rs + se_imm16;
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ }
+
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra)
+ set_pc(next_pc);
+}
+
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+void
+Simulator::decodeTypeJump(SimInstruction* instr)
+{
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int32_t pc_high_bits = current_pc & 0xf0000000;
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction())
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void
+Simulator::instructionDecode(SimInstruction* instr)
+{
+ if (Simulator::ICacheCheckingEnabled) {
+ AutoLockSimulatorCache als(this);
+ CheckICacheLocked(icache(), instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_)
+ setRegister(pc, reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize);
+}
+
+void
+Simulator::branchDelayInstructionDecode(SimInstruction* instr)
+{
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+template<bool enableStopSimAt>
+void
+Simulator::execute()
+{
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+ WasmActivation* activation = TlsPerThreadData.get()->runtimeFromMainThread()->wasmActivationStack();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ SimInstruction* instr = reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+
+ int32_t rpc = resume_pc_;
+ if (MOZ_UNLIKELY(rpc != 0)) {
+ // wasm signal handler ran and we have to adjust the pc.
+ activation->setResumePC((void*)get_pc());
+ set_pc(rpc);
+ resume_pc_ = 0;
+ }
+ }
+ program_counter = get_pc();
+ }
+}
+
+void
+Simulator::callInternal(uint8_t* entry)
+{
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int32_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t s0_val = getRegister(s0);
+ int32_t s1_val = getRegister(s1);
+ int32_t s2_val = getRegister(s2);
+ int32_t s3_val = getRegister(s3);
+ int32_t s4_val = getRegister(s4);
+ int32_t s5_val = getRegister(s5);
+ int32_t s6_val = getRegister(s6);
+ int32_t s7_val = getRegister(s7);
+ int32_t gp_val = getRegister(gp);
+ int32_t sp_val = getRegister(sp);
+ int32_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1)
+ execute<true>();
+ else
+ execute<false>();
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int32_t
+Simulator::call(uint8_t* entry, int argument_count, ...)
+{
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount)
+ entry_stack = entry_stack - argument_count * sizeof(int32_t);
+ else
+ entry_stack = entry_stack - kCArgsSlotsSize;
+
+ entry_stack &= ~(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg))
+ setRegister(argReg.code(), va_arg(parameters, int32_t));
+ else
+ stack_argument[i] = va_arg(parameters, int32_t);
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int32_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t
+Simulator::pushAddress(uintptr_t address)
+{
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t
+Simulator::popAddress()
+{
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator*
+JSRuntime::simulator() const
+{
+ return simulator_;
+}
+
+js::jit::Simulator*
+js::PerThreadData::simulator() const
+{
+ return runtime_->simulator();
+}
+
+uintptr_t*
+JSRuntime::addressOfSimulatorStackLimit()
+{
+ return simulator_->addressOfStackLimit();
+}
diff --git a/js/src/jit/mips32/Simulator-mips32.h b/js/src/jit/mips32/Simulator-mips32.h
new file mode 100644
index 000000000..96986dd9b
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -0,0 +1,424 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips32_Simulator_mips32_h
+#define jit_mips32_Simulator_mips32_h
+
+#ifdef JS_SIMULATOR_MIPS32
+
+#include "jit/IonTypes.h"
+#include "threading/Thread.h"
+#include "vm/MutexIDs.h"
+
+namespace js {
+namespace jit {
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+const intptr_t kPointerAlignment = 4;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask |
+ kFCSRUnderflowFlagMask |
+ kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask |
+ kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+class Simulator {
+ friend class Redirection;
+ friend class MipsDebugger;
+ friend class AutoLockSimulatorCache;
+ public:
+
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0, v1,
+ a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9,
+ k0, k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+ f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
+ f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+ f26, f27, f28, f29, f30, f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create(JSContext* cx);
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int32_t value);
+ int32_t getRegister(int reg) const;
+ double getDoubleFromRegisterPair(int reg);
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterFloat(int fpureg, int64_t value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ void setFpuRegisterDouble(int fpureg, int64_t value);
+ int32_t getFpuRegister(int fpureg) const;
+ int64_t getFpuRegisterLong(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(get_pc()); }
+
+ void set_resume_pc(void* value) {
+ resume_pc_ = int32_t(value);
+ }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template<bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int32_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+ // ICache checking.
+ static void FlushICache(void* start, size_t size);
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint32_t readBU(uint32_t addr);
+ inline int32_t readB(uint32_t addr);
+ inline void writeB(uint32_t addr, uint8_t value);
+ inline void writeB(uint32_t addr, int8_t value);
+
+ inline uint16_t readHU(uint32_t addr, SimInstruction* instr);
+ inline int16_t readH(uint32_t addr, SimInstruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void writeH(uint32_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint32_t addr, int16_t value, SimInstruction* instr);
+
+ inline int readW(uint32_t addr, SimInstruction* instr);
+ inline void writeW(uint32_t addr, int value, SimInstruction* instr);
+
+ inline double readD(uint32_t addr, SimInstruction* instr);
+ inline void writeD(uint32_t addr, double value, SimInstruction* instr);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static bool ICacheCheckingEnabled;
+
+ static int StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction, ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle arguments and return value for runtime FP functions.
+ void getFpArgs(double* x, double* y, int32_t* z);
+ void getFpFromStack(int32_t* stack, double* x);
+
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int32_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int icount_;
+ int break_count_;
+
+ int32_t resume_pc_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ private:
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+#ifdef DEBUG
+ mozilla::Maybe<Thread::Id> cacheLockHolder_;
+#endif
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return icache_;
+ }
+
+ Redirection* redirection() const {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ return redirection_;
+ }
+
+ void setRedirection(js::jit::Redirection* redirection) {
+ MOZ_ASSERT(cacheLockHolder_.isSome());
+ redirection_ = redirection;
+ }
+};
+
+#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \
+ JS_BEGIN_MACRO \
+ if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \
+ js::ReportOverRecursed(cx); \
+ onerror; \
+ } \
+ JS_END_MACRO
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS32 */
+
+#endif /* jit_mips32_Simulator_mips32_h */
diff --git a/js/src/jit/mips32/Trampoline-mips32.cpp b/js/src/jit/mips32/Trampoline-mips32.cpp
new file mode 100644
index 000000000..d422ed757
--- /dev/null
+++ b/js/src/jit/mips32/Trampoline-mips32.cpp
@@ -0,0 +1,1418 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jscompartment.h"
+
+#include "jit/Bailouts.h"
+#include "jit/JitCompartment.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/Linker.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/mips32/Bailouts-mips32.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean.");
+
+struct EnterJITRegs
+{
+ double f30;
+ double f28;
+ double f26;
+ double f24;
+ double f22;
+ double f20;
+
+ // empty slot for alignment
+ uintptr_t align;
+
+ // non-volatile registers.
+ uintptr_t ra;
+ uintptr_t s7;
+ uintptr_t s6;
+ uintptr_t s5;
+ uintptr_t s4;
+ uintptr_t s3;
+ uintptr_t s2;
+ uintptr_t s1;
+ uintptr_t s0;
+};
+
+struct EnterJITArgs
+{
+ // First 4 argumet placeholders
+ void* jitcode; // <- sp points here when function is entered.
+ int maxArgc;
+ Value* maxArgv;
+ InterpreterFrame* fp;
+
+ // Arguments on stack
+ CalleeToken calleeToken;
+ JSObject* scopeChain;
+ size_t numStackValues;
+ Value* vp;
+};
+
+static void
+GenerateReturn(MacroAssembler& masm, int returnCode)
+{
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s0)), s0);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s1)), s1);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s2)), s2);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s3)), s3);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s4)), s4);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s5)), s5);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s6)), s6);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, s7)), s7);
+ masm.loadPtr(Address(StackPointer, offsetof(EnterJITRegs, ra)), ra);
+
+ // Restore non-volatile floating point registers
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f20)), f20);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f22)), f22);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f24)), f24);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f26)), f26);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f28)), f28);
+ masm.loadDouble(Address(StackPointer, offsetof(EnterJITRegs, f30)), f30);
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void
+GeneratePrologue(MacroAssembler& masm)
+{
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.reserveStack(sizeof(EnterJITRegs));
+ masm.storePtr(s0, Address(StackPointer, offsetof(EnterJITRegs, s0)));
+ masm.storePtr(s1, Address(StackPointer, offsetof(EnterJITRegs, s1)));
+ masm.storePtr(s2, Address(StackPointer, offsetof(EnterJITRegs, s2)));
+ masm.storePtr(s3, Address(StackPointer, offsetof(EnterJITRegs, s3)));
+ masm.storePtr(s4, Address(StackPointer, offsetof(EnterJITRegs, s4)));
+ masm.storePtr(s5, Address(StackPointer, offsetof(EnterJITRegs, s5)));
+ masm.storePtr(s6, Address(StackPointer, offsetof(EnterJITRegs, s6)));
+ masm.storePtr(s7, Address(StackPointer, offsetof(EnterJITRegs, s7)));
+ masm.storePtr(ra, Address(StackPointer, offsetof(EnterJITRegs, ra)));
+
+ masm.as_sd(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_sd(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30));
+}
+
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp,
+ * CalleeToken calleeToken, JSObject* scopeChain, Value* vp)
+ * ...using standard EABI calling convention
+ */
+JitCode*
+JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
+{
+ const Register reg_code = a0;
+ const Register reg_argc = a1;
+ const Register reg_argv = a2;
+ const mozilla::DebugOnly<Register> reg_frame = a3;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ MacroAssembler masm(cx);
+ GeneratePrologue(masm);
+
+ const Address slotToken(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken));
+ const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp));
+
+ // Save stack pointer into s4
+ masm.movePtr(StackPointer, s4);
+
+ // Load calleeToken into s2.
+ masm.loadPtr(slotToken, s2);
+
+ // Save stack pointer as baseline frame.
+ if (type == EnterJitBaseline)
+ masm.movePtr(StackPointer, BaselineFrameReg);
+
+ // Load the number of actual arguments into s3.
+ masm.loadPtr(slotVp, s3);
+ masm.unboxInt32(Address(s3, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, s2, Imm32(CalleeToken_FunctionConstructing),
+ &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.as_sll(s0, reg_argc, 3); // s0 = argc * 8
+ masm.addPtr(reg_argv, s0); // s0 = argv + argc * 8
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), s0);
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6, s7);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(s3, Address(StackPointer, sizeof(uintptr_t))); // actual arguments
+ masm.storePtr(s2, Address(StackPointer, 0)); // callee token
+
+ masm.subPtr(StackPointer, s4);
+ masm.makeFrameDescriptor(s4, JitFrame_Entry, JitFrameLayout::Size());
+ masm.push(s4); // descriptor
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ if (type == EnterJitBaseline) {
+ // Handle OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(OsrFrameReg);
+ regs.take(BaselineFrameReg);
+ regs.take(reg_code);
+ regs.take(ReturnReg);
+
+ const Address slotNumStackValues(BaselineFrameReg, sizeof(EnterJITRegs) +
+ offsetof(EnterJITArgs, numStackValues));
+ const Address slotScopeChain(BaselineFrameReg, sizeof(EnterJITRegs) +
+ offsetof(EnterJITArgs, scopeChain));
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slotNumStackValues, numStackValues);
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, returnLabel.patchAt());
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(BaselineFrameReg, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = BaselineFrameReg;
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+ masm.movePtr(StackPointer, framePtr);
+
+ // Reserve space for locals and stack values.
+ masm.ma_sll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch);
+ masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size());
+
+ // Push frame descriptor and fake return address.
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
+
+ // No GC things to mark, push a bare token.
+ masm.enterFakeExitFrame(ExitFrameLayoutBareToken);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr, Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(BaselineFrameReg); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr));
+
+ regs.add(OsrFrameReg);
+ regs.take(JSReturnOperand);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.ma_addu(realFramePtr, framePtr, Imm32(sizeof(void*)));
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.ma_li(scratch, oomReturnLabel.patchAt());
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.loadPtr(slotScopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ if (type == EnterJitBaseline) {
+ // Baseline OSR will return here.
+ masm.bind(returnLabel.target());
+ masm.addCodeLabel(returnLabel);
+ masm.bind(oomReturnLabel.target());
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments off the stack.
+ // s0 <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(s0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), s0);
+ masm.addPtr(s0, StackPointer);
+
+ // Store the returned value into the slotVp
+ masm.loadPtr(slotVp, s1);
+ masm.storeValue(JSReturnOperand, Address(s1, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+
+ Linker linker(masm);
+ AutoFlushICache afc("GenerateEnterJIT");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "EnterJIT");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateInvalidator(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ // NOTE: Members ionScript_ and osiPointReturnAddress_ of
+ // InvalidationBailoutStack are already on the stack.
+ static const uint32_t STACK_DATA_SIZE = sizeof(InvalidationBailoutStack) -
+ 2 * sizeof(uintptr_t);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Make room for data on stack.
+ masm.subPtr(Imm32(STACK_DATA_SIZE), StackPointer);
+
+ // Save general purpose registers
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ Address address = Address(StackPointer, InvalidationBailoutStack::offsetOfRegs() +
+ i * sizeof(uintptr_t));
+ masm.storePtr(Register::FromCode(i), address);
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i ++)
+ masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
+ InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for return value and BailoutInfo pointer
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to return value.
+ masm.ma_addu(a1, StackPointer, Imm32(sizeof(uintptr_t)));
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a2);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.passABIArg(a2);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
+
+ masm.loadPtr(Address(StackPointer, 0), a2);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1);
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the
+ // return value.
+ masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)), StackPointer);
+ // remove the space that this frame was using before the bailout
+ // (computed by InvalidationBailout)
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+
+ Linker linker(masm);
+ AutoFlushICache afc("Invalidator");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+ JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw());
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "Invalidator");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
+{
+ MacroAssembler masm(cx);
+ masm.pushReturnAddress();
+
+ // ArgumentsRectifierReg contains the |nargs| pushed onto the current
+ // frame. Including |this|, there are (|nargs| + 1) arguments to copy.
+ MOZ_ASSERT(ArgumentsRectifierReg == s3);
+
+ Register numActArgsReg = t6;
+ Register calleeTokenReg = t7;
+ Register numArgsReg = t5;
+
+ // Copy number of actual arguments into numActArgsReg
+ masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()),
+ numActArgsReg);
+
+ // Load the number of |undefined|s to push into t1.
+ masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(CalleeTokenMask), numArgsReg);
+ masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()), numArgsReg);
+
+ masm.as_subu(t1, numArgsReg, s3);
+
+ // Get the topmost argument.
+ masm.ma_sll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_addu(t2, sp, t0); // t2 <- sp + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t2);
+
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg, Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // Add sizeof(Value) to overcome |this|
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ // Include the newly pushed newTarget value in the frame size
+ // calculated below.
+ masm.add32(Imm32(1), numArgsReg);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Push undefined.
+ masm.moveValue(UndefinedValue(), ValueOperand(t3, t4));
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t3, t4), Address(StackPointer, 0));
+ masm.sub32(Imm32(1), t1);
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop, initialSkip;
+
+ masm.ma_b(&initialSkip, ShortJump);
+
+ masm.bind(&copyLoopTop);
+ masm.subPtr(Imm32(sizeof(Value)), t2);
+ masm.sub32(Imm32(1), s3);
+
+ masm.bind(&initialSkip);
+
+ MOZ_ASSERT(sizeof(Value) == 2 * sizeof(uint32_t));
+ // Read argument and push to stack.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // translate the framesize from values into bytes
+ masm.ma_addu(t0, numArgsReg, Imm32(1));
+ masm.lshiftPtr(Imm32(3), t0);
+
+ // Construct sizeDescriptor.
+ masm.makeFrameDescriptor(t0, JitFrame_Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+ // Push actual arguments.
+ masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t)));
+ // Push callee token.
+ masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t)));
+ // Push frame descriptor.
+ masm.storePtr(t0, Address(StackPointer, 0));
+
+ // Call the target function.
+ // Note that this code assumes the function is JITted.
+ masm.andPtr(Imm32(CalleeTokenMask), calleeTokenReg);
+ masm.loadPtr(Address(calleeTokenReg, JSFunction::offsetOfNativeOrScript()), t1);
+ masm.loadBaselineOrIonRaw(t1, t1, nullptr);
+ uint32_t returnOffset = masm.callJitNoProfiler(t1);
+
+ // arg1
+ // ...
+ // argN
+ // num actual args
+ // callee token
+ // sizeDescriptor <- sp now
+ // return address
+
+ // Remove the rectifier frame.
+ // t0 <- descriptor with FrameType.
+ masm.loadPtr(Address(StackPointer, 0), t0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t0); // t0 <- descriptor.
+
+ // Discard descriptor, calleeToken and number of actual arguments.
+ masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+
+ // arg1
+ // ...
+ // argN <- sp now; t0 <- frame descriptor
+ // num actual args
+ // callee token
+ // sizeDescriptor
+ // return address
+
+ // Discard pushed arguments.
+ masm.addPtr(t0, StackPointer);
+
+ masm.ret();
+ Linker linker(masm);
+ AutoFlushICache afc("ArgumentsRectifier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+ if (returnAddrOut)
+ *returnAddrOut = (void*) (code->raw() + returnOffset);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier");
+#endif
+
+ return code;
+}
+
+// NOTE: Members snapshotOffset_ and padding_ of BailoutStack
+// are not stored in PushBailoutFrame().
+static const uint32_t bailoutDataSize = sizeof(BailoutStack) - 2 * sizeof(uintptr_t);
+static const uint32_t bailoutInfoOutParamSize = 2 * sizeof(uintptr_t);
+
+/* There are two different stack layouts when doing bailout. They are
+ * represented via class BailoutStack.
+ *
+ * - First case is when bailout is done trough bailout table. In this case
+ * table offset is stored in $ra (look at JitRuntime::generateBailoutTable())
+ * and thunk code should save it on stack. In this case frameClassId_ cannot
+ * be NO_FRAME_SIZE_CLASS_ID. Members snapshotOffset_ and padding_ are not on
+ * the stack.
+ *
+ * - Other case is when bailout is done via out of line code (lazy bailout).
+ * In this case frame size is stored in $ra (look at
+ * CodeGeneratorMIPS::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS::visitOutOfLineBailout(). Field
+ * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID
+ * (See: JitRuntime::generateBailoutHandler).
+ */
+static void
+PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
+{
+ // Make sure that alignment is proper.
+ masm.checkStackAlignment();
+
+ // Make room for data.
+ masm.subPtr(Imm32(bailoutDataSize), StackPointer);
+
+ // Save general purpose registers.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ uint32_t off = BailoutStack::offsetOfRegs() + i * sizeof(uintptr_t);
+ masm.storePtr(Register::FromCode(i), Address(StackPointer, off));
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++)
+ masm.as_sd(FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
+ BailoutStack::offsetOfFpRegs() + i * sizeof(double));
+
+ // Store the frameSize_ or tableOffset_ stored in ra
+ // See: JitRuntime::generateBailoutTable()
+ // See: CodeGeneratorMIPS::generateOutOfLineCode()
+ masm.storePtr(ra, Address(StackPointer, BailoutStack::offsetOfFrameSize()));
+
+ // Put frame class to stack
+ masm.storePtr(ImmWord(frameClass), Address(StackPointer, BailoutStack::offsetOfFrameClass()));
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void
+GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+{
+ PushBailoutFrame(masm, frameClass, a0);
+
+ // Put pointer to BailoutInfo
+ masm.subPtr(Imm32(bailoutInfoOutParamSize), StackPointer);
+ masm.storePtr(ImmPtr(nullptr), Address(StackPointer, 0));
+ masm.movePtr(StackPointer, a1);
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // Load frameSize from stack
+ masm.loadPtr(Address(StackPointer,
+ bailoutInfoOutParamSize + BailoutStack::offsetOfFrameSize()), a1);
+
+ // Remove complete BailoutStack class and data after it
+ masm.addPtr(Imm32(sizeof(BailoutStack) + bailoutInfoOutParamSize), StackPointer);
+ // Remove frame size srom stack
+ masm.addPtr(a1, StackPointer);
+ } else {
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ // Remove the data this fuction added and frame size.
+ masm.addPtr(Imm32(bailoutDataSize + bailoutInfoOutParamSize + frameSize), StackPointer);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail();
+ masm.branch(bailoutTail);
+}
+
+JitCode*
+JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass)
+{
+ MacroAssembler masm(cx);
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++) {
+ // Calculate offset to the end of table
+ int32_t offset = (BAILOUT_TABLE_SIZE - i) * BAILOUT_TABLE_ENTRY_SIZE;
+
+ // We use the 'ra' as table offset later in GenerateBailoutThunk
+ masm.as_bal(BOffImm16(offset));
+ masm.nop();
+ }
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(cx, masm, frameClass);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTable");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTable");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+ GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutHandler");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutHandler");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
+{
+ MOZ_ASSERT(functionWrappers_);
+ MOZ_ASSERT(functionWrappers_->initialized());
+ VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f);
+ if (p)
+ return p->value();
+
+ MacroAssembler masm(cx);
+
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall)
+ masm.pushReturnAddress();
+
+ // We're aligned to an exit frame, so link it up.
+ masm.enterExitFrame(&f);
+ masm.loadJSContext(cxreg);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_addu(argsBase, StackPointer, Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ masm.alignStackPointer();
+
+ // Reserve space for the outparameter. Reserve sizeof(Value) for every
+ // case so that stack stays aligned.
+ uint32_t outParamSize = 0;
+ switch (f.outParam) {
+ case Type_Value:
+ outParamSize = sizeof(Value);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Handle:
+ {
+ uint32_t pushed = masm.framePushed();
+ masm.PushEmptyRooted(f.outParamRootType);
+ outParamSize = masm.framePushed() - pushed;
+ }
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ case Type_Pointer:
+ outParamSize = sizeof(uintptr_t);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Double:
+ outParamSize = sizeof(double);
+ masm.reserveStack(outParamSize);
+ break;
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ uint32_t outParamOffset = 0;
+ if (f.outParam != Type_Void) {
+ // Make sure that stack is double aligned after outParam.
+ MOZ_ASSERT(outParamSize <= sizeof(double));
+ outParamOffset += sizeof(double) - outParamSize;
+ }
+ // Reserve stack for double sized args that are copied to be aligned.
+ outParamOffset += f.doubleByRefArgs() * sizeof(double);
+
+ Register doubleArgs = t0;
+ masm.reserveStack(outParamOffset);
+ masm.movePtr(StackPointer, doubleArgs);
+
+ if (!generateTLEnterVM(cx, masm, f))
+ return nullptr;
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+ size_t doubleArgDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ MoveOperand from;
+ switch (f.argProperties(explicitArg)) {
+ case VMFunction::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunction::DoubleByValue:
+ // Values should be passed by reference, not by value, so we
+ // assert that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunction::WordByRef:
+ masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunction::DoubleByRef:
+ // Copy double sized argument to aligned place.
+ masm.ma_ld(ScratchDoubleReg, Address(argsBase, argDisp));
+ masm.as_sd(ScratchDoubleReg, doubleArgs, doubleArgDisp);
+ masm.passABIArg(MoveOperand(doubleArgs, doubleArgDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ doubleArgDisp += sizeof(double);
+ argDisp += sizeof(double);
+ break;
+ }
+ }
+
+ MOZ_ASSERT_IF(f.outParam != Type_Void,
+ doubleArgDisp + sizeof(double) == outParamOffset + outParamSize);
+
+ // Copy the implicit outparam, if any.
+ if (f.outParam != Type_Void) {
+ masm.passABIArg(MoveOperand(doubleArgs, outParamOffset, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ }
+
+ masm.callWithABI(f.wrapped);
+
+ if (!generateTLExitVM(cx, masm, f))
+ return nullptr;
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ masm.freeStack(outParamOffset);
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ case Type_Pointer:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Double:
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ masm.as_ld(ReturnDoubleReg, StackPointer, 0);
+ } else {
+ masm.assumeUnreachable("Unable to load into float reg, with no FP support.");
+ }
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.restoreStackPointer();
+
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(uintptr_t) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ Linker linker(masm);
+ AutoFlushICache afc("VMWrapper");
+ JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE);
+ if (!wrapper)
+ return nullptr;
+
+ // linker.newCode may trigger a GC and sweep functionWrappers_ so we have
+ // to use relookupOrAdd instead of add.
+ if (!functionWrappers_->relookupOrAdd(p, &f, wrapper))
+ return nullptr;
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(wrapper, "VMWrapper");
+#endif
+
+ return wrapper;
+}
+
+JitCode*
+JitRuntime::generatePreBarrier(JSContext* cx, MIRType type)
+{
+ MacroAssembler masm(cx);
+
+ LiveRegisterSet save;
+ if (cx->runtime()->jitSupportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(IonMarkFunction(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("PreBarrier");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "PreBarrier");
+#endif
+
+ return code;
+}
+
+typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*);
+static const VMFunction HandleDebugTrapInfo =
+ FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap");
+
+JitCode*
+JitRuntime::generateDebugTrapHandler(JSContext* cx)
+{
+ MacroAssembler masm(cx);
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+
+ // Load BaselineFrame pointer in scratch1.
+ masm.movePtr(s5, scratch1);
+ masm.subPtr(Imm32(BaselineFrame::Size()), scratch1);
+
+ // Enter a stub frame and call the HandleDebugTrap VM function. Ensure
+ // the stub frame has a nullptr ICStub pointer, since this pointer is
+ // marked during GC.
+ masm.movePtr(ImmPtr(nullptr), ICStubReg);
+ EmitBaselineEnterStubFrame(masm, scratch2);
+
+ JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo);
+ if (!code)
+ return nullptr;
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(ra, Address(StackPointer, sizeof(uintptr_t)));
+ masm.storePtr(scratch1, Address(StackPointer, 0));
+
+ EmitBaselineCallVM(code, masm);
+
+ EmitBaselineLeaveStubFrame(masm);
+
+ // If the stub returns |true|, we have to perform a forced return
+ // (return from the JS frame). If the stub returns |false|, just return
+ // from the trap stub so that execution continues at the current pc.
+ Label forcedReturn;
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn);
+
+ // ra was restored by EmitLeaveStubFrame
+ masm.branch(ra);
+
+ masm.bind(&forcedReturn);
+ masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ masm.movePtr(s5, StackPointer);
+ masm.pop(s5);
+
+ // Before returning, if profiling is turned on, make sure that lastProfilingFrame
+ // is set to the correct caller frame.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation);
+ masm.profilerExitFrame();
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.ret();
+
+ Linker linker(masm);
+ AutoFlushICache afc("DebugTrapHandler");
+ JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler");
+#endif
+
+ return codeDbg;
+}
+
+
+JitCode*
+JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler)
+{
+ MacroAssembler masm;
+
+ masm.handleFailureWithHandlerTail(handler);
+
+ Linker linker(masm);
+ AutoFlushICache afc("ExceptionTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ExceptionTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateBailoutTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ masm.generateBailoutTail(a1, a2);
+
+ Linker linker(masm);
+ AutoFlushICache afc("BailoutTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "BailoutTailStub");
+#endif
+
+ return code;
+}
+
+JitCode*
+JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
+{
+ MacroAssembler masm;
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+ Register scratch4 = t3;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation());
+ masm.loadPtr(activationAddr, actReg);
+
+ Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.ma_and(scratch2, scratch1, Imm32((1 << FRAMETYPE_BITS) - 1));
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonAccessorIC;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry);
+
+ masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // JitFrame_IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.ma_addu(scratch2, scratch2, Imm32(JitFrameLayout::Size()));
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ masm.as_addu(scratch3, StackPointer, scratch1);
+ Address stubFrameReturnAddr(scratch3,
+ JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+
+ //
+ // JitFrame_Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS or a
+ // BaselineStub frame.
+ //
+ // Stack layout if caller of rectifier was Ion:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.add32(Imm32(JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3);
+ masm.ma_srl(scratch1, scratch3, Imm32(FRAMESIZE_SHIFT));
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ // Check for either Ion or BaselineStub frame.
+ Label handle_Rectifier_BaselineStub;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS),
+ &handle_Rectifier_BaselineStub);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size()
+ masm.as_addu(scratch3, scratch2, scratch1);
+ masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.bind(&handle_Rectifier_BaselineStub);
+#ifdef DEBUG
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk);
+ masm.assumeUnreachable("Unrecognized frame preceding baselineStub.");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.as_addu(scratch3, scratch2, scratch1);
+ Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(scratch3,
+ RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // JitFrame_IonAccessorIC
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- AccFrame-Descriptor.Size
+ // StubCode |
+ // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size()
+ // AccFrame-ReturnAddr |
+ // ... accessor frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonAccessorIC);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := AccFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk);
+ masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := AccFrame-ReturnAddr
+ masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size +
+ // IonAccessorICFrameLayout::Size()
+ masm.as_addu(scratch1, scratch2, scratch3);
+ masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // JitFrame_Entry
+ //
+ // If at an entry frame, store null into both fields.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ Linker linker(masm);
+ AutoFlushICache afc("ProfilerExitFrameTailStub");
+ JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
+
+#ifdef JS_ION_PERF
+ writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub");
+#endif
+
+ return code;
+}