diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /js/src/jit/mips64 | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/jit/mips64')
25 files changed, 12913 insertions, 0 deletions
diff --git a/js/src/jit/mips64/Architecture-mips64.cpp b/js/src/jit/mips64/Architecture-mips64.cpp new file mode 100644 index 000000000..d7b0a55a5 --- /dev/null +++ b/js/src/jit/mips64/Architecture-mips64.cpp @@ -0,0 +1,93 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/Architecture-mips64.h" + +#include "jit/RegisterSets.h" + +namespace js { +namespace jit { + +const char * const Registers::RegNames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", + "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" }; + +const uint32_t Allocatable = 22; + +const Registers::SetType Registers::ArgRegMask = + Registers::SharedArgRegMask | + (1 << a4) | (1 << a5) | (1 << a6) | (1 << a7); + +const Registers::SetType Registers::JSCallMask = + (1 << Registers::v1); + +const Registers::SetType Registers::CallMask = + (1 << Registers::v0); + +FloatRegisters::Encoding +FloatRegisters::FromName(const char* name) +{ + for (size_t i = 0; i < Total; i++) { + if (strcmp(GetName(Encoding(i)), name) == 0) + return Encoding(i); + } + + return Invalid; +} + +FloatRegister +FloatRegister::singleOverlay() const +{ + MOZ_ASSERT(!isInvalid()); + if (kind_ == Codes::Double) + return FloatRegister(reg_, Codes::Single); + return *this; +} + +FloatRegister +FloatRegister::doubleOverlay() const +{ + MOZ_ASSERT(!isInvalid()); + if (kind_ != Codes::Double) + return FloatRegister(reg_, Codes::Double); + return *this; +} + +FloatRegisterSet +FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) +{ + LiveFloatRegisterSet mod; + for (FloatRegisterIterator iter(s); iter.more(); ++iter) { + if ((*iter).isSingle()) { + // Even for single size registers save complete double register. + mod.addUnchecked((*iter).doubleOverlay()); + } else { + mod.addUnchecked(*iter); + } + } + return mod.set(); +} + +uint32_t +FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) +{ + FloatRegisterSet ss = s.reduceSetForPush(); + uint64_t bits = ss.bits(); + // We are only pushing double registers. + MOZ_ASSERT((bits & 0xffffffff) == 0); + uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double); + return ret; +} +uint32_t +FloatRegister::getRegisterDumpOffsetInBytes() +{ + return id() * sizeof(double); +} + +} // namespace ion +} // namespace js + diff --git a/js/src/jit/mips64/Architecture-mips64.h b/js/src/jit/mips64/Architecture-mips64.h new file mode 100644 index 000000000..dde783442 --- /dev/null +++ b/js/src/jit/mips64/Architecture-mips64.h @@ -0,0 +1,209 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_Architecture_mips64_h +#define jit_mips64_Architecture_mips64_h + +#include "mozilla/MathAlgorithms.h" + +#include <limits.h> +#include <stdint.h> + +#include "jit/mips-shared/Architecture-mips-shared.h" + +#include "js/Utility.h" + +namespace js { +namespace jit { + +// Shadow stack space is not required on MIPS64. +static const uint32_t ShadowStackSpace = 0; + +// MIPS64 have 64 bit floating-point coprocessor. There are 32 double +// precision register which can also be used as single precision registers. +class FloatRegisters : public FloatRegistersMIPSShared +{ + public: + enum ContentType { + Single, + Double, + NumTypes + }; + + static const char* GetName(uint32_t i) { + MOZ_ASSERT(i < TotalPhys); + return FloatRegistersMIPSShared::GetName(Encoding(i)); + } + + static Encoding FromName(const char* name); + + static const uint32_t Total = 32 * NumTypes; + static const uint32_t Allocatable = 60; + // When saving all registers we only need to do is save double registers. + static const uint32_t TotalPhys = 32; + + static_assert(sizeof(SetType) * 8 >= Total, + "SetType should be large enough to enumerate all registers."); + + // Magic values which are used to duplicate a mask of physical register for + // a specific type of register. A multiplication is used to copy and shift + // the bits of the physical register mask. + static const SetType SpreadSingle = SetType(1) << (uint32_t(Single) * TotalPhys); + static const SetType SpreadDouble = SetType(1) << (uint32_t(Double) * TotalPhys); + static const SetType SpreadScalar = SpreadSingle | SpreadDouble; + static const SetType SpreadVector = 0; + static const SetType Spread = SpreadScalar | SpreadVector; + + static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1); + static const SetType AllMask = AllPhysMask * Spread; + static const SetType AllSingleMask = AllPhysMask * SpreadSingle; + static const SetType AllDoubleMask = AllPhysMask * SpreadDouble; + + static const SetType NonVolatileMask = + ( (1U << FloatRegisters::f24) | + (1U << FloatRegisters::f25) | + (1U << FloatRegisters::f26) | + (1U << FloatRegisters::f27) | + (1U << FloatRegisters::f28) | + (1U << FloatRegisters::f29) | + (1U << FloatRegisters::f30) | + (1U << FloatRegisters::f31) + ) * SpreadScalar + | AllPhysMask * SpreadVector; + + static const SetType VolatileMask = AllMask & ~NonVolatileMask; + + static const SetType WrapperMask = VolatileMask; + + static const SetType NonAllocatableMask = + ( // f21 and f23 are MIPS scratch float registers. + (1U << FloatRegisters::f21) | + (1U << FloatRegisters::f23) + ) * Spread; + + // Registers that can be allocated without being saved, generally. + static const SetType TempMask = VolatileMask & ~NonAllocatableMask; + + static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; +}; + +template <typename T> +class TypedRegisterSet; + +class FloatRegister : public FloatRegisterMIPSShared +{ + public: + typedef FloatRegisters Codes; + typedef size_t Code; + typedef Codes::Encoding Encoding; + typedef Codes::ContentType ContentType; + + Encoding reg_: 6; + private: + ContentType kind_ : 3; + + public: + constexpr FloatRegister(uint32_t r, ContentType kind = Codes::Double) + : reg_(Encoding(r)), kind_(kind) + { } + constexpr FloatRegister() + : reg_(Encoding(FloatRegisters::invalid_freg)), kind_(Codes::Double) + { } + + bool operator==(const FloatRegister& other) const { + MOZ_ASSERT(!isInvalid()); + MOZ_ASSERT(!other.isInvalid()); + return kind_ == other.kind_ && reg_ == other.reg_; + } + bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; } + size_t size() const { return (kind_ == Codes::Double) ? sizeof(double) : sizeof (float); } + bool isInvalid() const { + return reg_ == FloatRegisters::invalid_freg; + } + + bool isSingle() const { return kind_ == Codes::Single; } + bool isDouble() const { return kind_ == Codes::Double; } + + FloatRegister singleOverlay() const; + FloatRegister doubleOverlay() const; + + FloatRegister asSingle() const { return singleOverlay(); } + FloatRegister asDouble() const { return doubleOverlay(); } + FloatRegister asSimd128() const { MOZ_CRASH("NYI"); } + + Code code() const { + MOZ_ASSERT(!isInvalid()); + return Code(reg_ | (kind_ << 5)); + } + Encoding encoding() const { + MOZ_ASSERT(!isInvalid()); + MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys); + return reg_; + } + uint32_t id() const { + return reg_; + } + static FloatRegister FromCode(uint32_t i) { + uint32_t code = i & 0x1f; + uint32_t kind = i >> 5; + return FloatRegister(Code(code), ContentType(kind)); + } + + bool volatile_() const { + return !!((1 << reg_) & FloatRegisters::VolatileMask); + } + const char* name() const { + return FloatRegisters::GetName(reg_); + } + bool operator != (const FloatRegister& other) const { + return kind_ != other.kind_ || reg_ != other.reg_; + } + bool aliases(const FloatRegister& other) { + return reg_ == other.reg_; + } + uint32_t numAliased() const { + return 2; + } + void aliased(uint32_t aliasIdx, FloatRegister* ret) { + if (aliasIdx == 0) { + *ret = *this; + return; + } + MOZ_ASSERT(aliasIdx == 1); + if (isDouble()) + *ret = singleOverlay(); + else + *ret = doubleOverlay(); + } + uint32_t numAlignedAliased() const { + return 2; + } + void alignedAliased(uint32_t aliasIdx, FloatRegister* ret) { + MOZ_ASSERT(isDouble()); + if (aliasIdx == 0) { + *ret = *this; + return; + } + MOZ_ASSERT(aliasIdx == 1); + *ret = singleOverlay(); + } + + SetType alignedOrDominatedAliasedSet() const { + return Codes::Spread << reg_; + } + + static Code FromName(const char* name) { + return FloatRegisters::FromName(name); + } + static TypedRegisterSet<FloatRegister> ReduceSetForPush(const TypedRegisterSet<FloatRegister>& s); + static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s); + uint32_t getRegisterDumpOffsetInBytes(); +}; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_Architecture_mips64_h */ diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp new file mode 100644 index 000000000..4d251f152 --- /dev/null +++ b/js/src/jit/mips64/Assembler-mips64.cpp @@ -0,0 +1,529 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/Assembler-mips64.h" + +#include "mozilla/DebugOnly.h" + +using mozilla::DebugOnly; + +using namespace js; +using namespace js::jit; + +ABIArgGenerator::ABIArgGenerator() + : usedArgSlots_(0), + firstArgFloat(false), + current_() +{} + +ABIArg +ABIArgGenerator::next(MIRType type) +{ + switch (type) { + case MIRType::Int32: + case MIRType::Int64: + case MIRType::Pointer: { + Register destReg; + if (GetIntArgReg(usedArgSlots_, &destReg)) + current_ = ABIArg(destReg); + else + current_ = ABIArg(GetArgStackDisp(usedArgSlots_)); + usedArgSlots_++; + break; + } + case MIRType::Float32: + case MIRType::Double: { + FloatRegister destFReg; + FloatRegister::ContentType contentType; + if (!usedArgSlots_) + firstArgFloat = true; + contentType = (type == MIRType::Double) ? + FloatRegisters::Double : FloatRegisters::Single; + if (GetFloatArgReg(usedArgSlots_, &destFReg)) + current_ = ABIArg(FloatRegister(destFReg.id(), contentType)); + else + current_ = ABIArg(GetArgStackDisp(usedArgSlots_)); + usedArgSlots_++; + break; + } + default: + MOZ_CRASH("Unexpected argument type"); + } + return current_; +} + +uint32_t +js::jit::RT(FloatRegister r) +{ + MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys); + return r.id() << RTShift; +} + +uint32_t +js::jit::RD(FloatRegister r) +{ + MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys); + return r.id() << RDShift; +} + +uint32_t +js::jit::RZ(FloatRegister r) +{ + MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys); + return r.id() << RZShift; +} + +uint32_t +js::jit::SA(FloatRegister r) +{ + MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys); + return r.id() << SAShift; +} + +// Used to patch jumps created by MacroAssemblerMIPS64Compat::jumpWithPatch. +void +jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect) +{ + Instruction* inst = (Instruction*)jump_.raw(); + + // Six instructions used in load 64-bit imm. + MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect); + Assembler::UpdateLoad64Value(inst, (uint64_t)label.raw()); + + AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t)); +} + +// For more infromation about backedges look at comment in +// MacroAssemblerMIPS64Compat::backedgeJump() +void +jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label, + JitRuntime::BackedgeTarget target) +{ + uintptr_t sourceAddr = (uintptr_t)jump.raw(); + uintptr_t targetAddr = (uintptr_t)label.raw(); + InstImm* branch = (InstImm*)jump.raw(); + + MOZ_ASSERT(branch->extractOpcode() == (uint32_t(op_beq) >> OpcodeShift)); + + if (BOffImm16::IsInRange(targetAddr - sourceAddr)) { + branch->setBOffImm16(BOffImm16(targetAddr - sourceAddr)); + } else { + if (target == JitRuntime::BackedgeLoopHeader) { + Instruction* inst = &branch[1]; + Assembler::UpdateLoad64Value(inst, targetAddr); + // Jump to first ori. The lui will be executed in delay slot. + branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t))); + } else { + Instruction* inst = &branch[6]; + Assembler::UpdateLoad64Value(inst, targetAddr); + // Jump to first ori of interrupt loop. + branch->setBOffImm16(BOffImm16(6 * sizeof(uint32_t))); + } + } +} + +void +Assembler::executableCopy(uint8_t* buffer) +{ + MOZ_ASSERT(isFinished); + m_buffer.executableCopy(buffer); + + // Patch all long jumps during code copy. + for (size_t i = 0; i < longJumps_.length(); i++) { + Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]); + + uint64_t value = Assembler::ExtractLoad64Value(inst); + Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value); + } + + AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); +} + +uintptr_t +Assembler::GetPointer(uint8_t* instPtr) +{ + Instruction* inst = (Instruction*)instPtr; + return Assembler::ExtractLoad64Value(inst); +} + +static JitCode * +CodeFromJump(Instruction* jump) +{ + uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump); + return JitCode::FromExecutable(target); +} + +void +Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) +{ + while (reader.more()) { + JitCode* child = CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned())); + TraceManuallyBarrieredEdge(trc, &child, "rel32"); + } +} + +static void +TraceOneDataRelocation(JSTracer* trc, Instruction* inst) +{ + void* ptr = (void*)Assembler::ExtractLoad64Value(inst); + void* prior = ptr; + + // All pointers on MIPS64 will have the top bits cleared. If those bits + // are not cleared, this must be a Value. + uintptr_t word = reinterpret_cast<uintptr_t>(ptr); + if (word >> JSVAL_TAG_SHIFT) { + Value v = Value::fromRawBits(word); + TraceManuallyBarrieredEdge(trc, &v, "ion-masm-value"); + ptr = (void*)v.bitsAsPunboxPointer(); + } else { + // No barrier needed since these are constants. + TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr), + "ion-masm-ptr"); + } + + if (ptr != prior) { + Assembler::UpdateLoad64Value(inst, uint64_t(ptr)); + AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t)); + } +} + +static void +TraceDataRelocations(JSTracer* trc, uint8_t* buffer, CompactBufferReader& reader) +{ + while (reader.more()) { + size_t offset = reader.readUnsigned(); + Instruction* inst = (Instruction*)(buffer + offset); + TraceOneDataRelocation(trc, inst); + } +} + +static void +TraceDataRelocations(JSTracer* trc, MIPSBuffer* buffer, CompactBufferReader& reader) +{ + while (reader.more()) { + BufferOffset bo (reader.readUnsigned()); + MIPSBuffer::AssemblerBufferInstIterator iter(bo, buffer); + TraceOneDataRelocation(trc, iter.cur()); + } +} + +void +Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader) +{ + ::TraceDataRelocations(trc, code->raw(), reader); +} + +void +Assembler::trace(JSTracer* trc) +{ + for (size_t i = 0; i < jumps_.length(); i++) { + RelativePatch& rp = jumps_[i]; + if (rp.kind == Relocation::JITCODE) { + JitCode* code = JitCode::FromExecutable((uint8_t*)rp.target); + TraceManuallyBarrieredEdge(trc, &code, "masmrel32"); + MOZ_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target)); + } + } + if (dataRelocations_.length()) { + CompactBufferReader reader(dataRelocations_); + ::TraceDataRelocations(trc, &m_buffer, reader); + } +} + +void +Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address) +{ + if (label->bound()) { + intptr_t offset = label->offset(); + Instruction* inst = (Instruction*) (rawCode + offset); + Assembler::UpdateLoad64Value(inst, (uint64_t)address); + } +} + +void +Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) +{ + int64_t offset = target - branch; + InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)); + InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); + + // If encoded offset is 4, then the jump must be short + if (BOffImm16(inst[0]).decode() == 4) { + MOZ_ASSERT(BOffImm16::IsInRange(offset)); + inst[0].setBOffImm16(BOffImm16(offset)); + inst[1].makeNop(); + return; + } + + // Generate the long jump for calls because return address has to be the + // address after the reserved block. + if (inst[0].encode() == inst_bgezal.encode()) { + addLongJump(BufferOffset(branch)); + Assembler::WriteLoad64Instructions(inst, ScratchRegister, target); + inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode(); + // There is 1 nop after this. + return; + } + + if (BOffImm16::IsInRange(offset)) { + // Don't skip trailing nops can improve performance + // on Loongson3 platform. + bool skipNops = !isLoongson() && (inst[0].encode() != inst_bgezal.encode() && + inst[0].encode() != inst_beq.encode()); + + inst[0].setBOffImm16(BOffImm16(offset)); + inst[1].makeNop(); + + if (skipNops) { + inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode(); + // There are 4 nops after this + } + return; + } + + if (inst[0].encode() == inst_beq.encode()) { + // Handle long unconditional jump. + addLongJump(BufferOffset(branch)); + Assembler::WriteLoad64Instructions(inst, ScratchRegister, target); + inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); + // There is 1 nop after this. + } else { + // Handle long conditional jump. + inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t))); + // No need for a "nop" here because we can clobber scratch. + addLongJump(BufferOffset(branch + sizeof(uint32_t))); + Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target); + inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); + // There is 1 nop after this. + } +} + +void +Assembler::bind(RepatchLabel* label) +{ + BufferOffset dest = nextOffset(); + if (label->used() && !oom()) { + // If the label has a use, then change this use to refer to + // the bound label; + BufferOffset b(label->offset()); + InstImm* inst = (InstImm*)editSrc(b); + InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); + uint64_t offset = dest.getOffset() - label->offset(); + + // If first instruction is lui, then this is a long jump. + // If second instruction is lui, then this is a loop backedge. + if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) { + // For unconditional long branches generated by ma_liPatchable, + // such as under: + // jumpWithpatch + Assembler::UpdateLoad64Value(inst, dest.getOffset()); + } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) || + BOffImm16::IsInRange(offset)) + { + // Handle code produced by: + // backedgeJump + // branchWithCode + MOZ_ASSERT(BOffImm16::IsInRange(offset)); + MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) || + inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) || + inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) || + inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift)); + inst[0].setBOffImm16(BOffImm16(offset)); + } else if (inst[0].encode() == inst_beq.encode()) { + // Handle open long unconditional jumps created by + // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...). + // We need to add it to long jumps array here. + // See MacroAssemblerMIPS64::branchWithCode(). + MOZ_ASSERT(inst[1].encode() == NopInst); + MOZ_ASSERT(inst[2].encode() == NopInst); + MOZ_ASSERT(inst[3].encode() == NopInst); + MOZ_ASSERT(inst[4].encode() == NopInst); + MOZ_ASSERT(inst[5].encode() == NopInst); + addLongJump(BufferOffset(label->offset())); + Assembler::WriteLoad64Instructions(inst, ScratchRegister, dest.getOffset()); + inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); + } else { + // Handle open long conditional jumps created by + // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...). + inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t))); + // No need for a "nop" here because we can clobber scratch. + // We need to add it to long jumps array here. + // See MacroAssemblerMIPS64::branchWithCode(). + MOZ_ASSERT(inst[1].encode() == NopInst); + MOZ_ASSERT(inst[2].encode() == NopInst); + MOZ_ASSERT(inst[3].encode() == NopInst); + MOZ_ASSERT(inst[4].encode() == NopInst); + MOZ_ASSERT(inst[5].encode() == NopInst); + MOZ_ASSERT(inst[6].encode() == NopInst); + addLongJump(BufferOffset(label->offset() + sizeof(uint32_t))); + Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, dest.getOffset()); + inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); + } + } + label->bind(dest.getOffset()); +} + +uint32_t +Assembler::PatchWrite_NearCallSize() +{ + // Load an address needs 4 instructions, and a jump with a delay slot. + return (4 + 2) * sizeof(uint32_t); +} + +void +Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) +{ + Instruction* inst = (Instruction*) start.raw(); + uint8_t* dest = toCall.raw(); + + // Overwrite whatever instruction used to be here with a call. + // Always use long jump for two reasons: + // - Jump has to be the same size because of PatchWrite_NearCallSize. + // - Return address has to be at the end of replaced block. + // Short jump wouldn't be more efficient. + Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest); + inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); + inst[5] = InstNOP(); + + // Ensure everyone sees the code that was just written into memory. + AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize()); +} + +uint64_t +Assembler::ExtractLoad64Value(Instruction* inst0) +{ + InstImm* i0 = (InstImm*) inst0; + InstImm* i1 = (InstImm*) i0->next(); + InstReg* i2 = (InstReg*) i1->next(); + InstImm* i3 = (InstImm*) i2->next(); + InstImm* i5 = (InstImm*) i3->next()->next(); + + MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) && + (i2->extractFunctionField() == ff_dsrl32)) + { + uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) | + (uint64_t(i1->extractImm16Value()) << 16) | + uint64_t(i3->extractImm16Value()); + return uint64_t((int64_t(value) <<16) >> 16); + } + + MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + uint64_t value = (uint64_t(i0->extractImm16Value()) << 48) | + (uint64_t(i1->extractImm16Value()) << 32) | + (uint64_t(i3->extractImm16Value()) << 16) | + uint64_t(i5->extractImm16Value()); + return value; +} + +void +Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) +{ + InstImm* i0 = (InstImm*) inst0; + InstImm* i1 = (InstImm*) i0->next(); + InstReg* i2 = (InstReg*) i1->next(); + InstImm* i3 = (InstImm*) i2->next(); + InstImm* i5 = (InstImm*) i3->next()->next(); + + MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) && + (i2->extractFunctionField() == ff_dsrl32)) + { + i0->setImm16(Imm16::Lower(Imm32(value >> 32))); + i1->setImm16(Imm16::Upper(Imm32(value))); + i3->setImm16(Imm16::Lower(Imm32(value))); + return; + } + + MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + i0->setImm16(Imm16::Upper(Imm32(value >> 32))); + i1->setImm16(Imm16::Lower(Imm32(value >> 32))); + i3->setImm16(Imm16::Upper(Imm32(value))); + i5->setImm16(Imm16::Lower(Imm32(value))); +} + +void +Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value) +{ + Instruction* inst1 = inst0->next(); + Instruction* inst2 = inst1->next(); + Instruction* inst3 = inst2->next(); + + *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32))); + *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value))); + *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32); + *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value))); +} + +void +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, + ImmPtr expectedValue) +{ + PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value), + PatchedImmPtr(expectedValue.value)); +} + +void +Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + PatchedImmPtr expectedValue) +{ + Instruction* inst = (Instruction*) label.raw(); + + // Extract old Value + DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst); + MOZ_ASSERT(value == uint64_t(expectedValue.value)); + + // Replace with new value + Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value)); + + AutoFlushICache::flush(uintptr_t(inst), 6 * sizeof(uint32_t)); +} + +void +Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) +{ + InstImm* inst = (InstImm*)code; + Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value); +} + +uint64_t +Assembler::ExtractInstructionImmediate(uint8_t* code) +{ + InstImm* inst = (InstImm*)code; + return Assembler::ExtractLoad64Value(inst); +} + +void +Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) +{ + Instruction* inst = (Instruction*)inst_.raw(); + InstImm* i0 = (InstImm*) inst; + InstImm* i1 = (InstImm*) i0->next(); + InstImm* i3 = (InstImm*) i1->next()->next(); + Instruction* i4 = (Instruction*) i3->next(); + + MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + if (enabled) { + MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift)); + InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); + *i4 = jalr; + } else { + InstNOP nop; + *i4 = nop; + } + + AutoFlushICache::flush(uintptr_t(i4), sizeof(uint32_t)); +} diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h new file mode 100644 index 000000000..8a71c57bb --- /dev/null +++ b/js/src/jit/mips64/Assembler-mips64.h @@ -0,0 +1,236 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_Assembler_mips64_h +#define jit_mips64_Assembler_mips64_h + +#include "jit/mips-shared/Assembler-mips-shared.h" + +#include "jit/mips64/Architecture-mips64.h" + +namespace js { +namespace jit { + +static constexpr Register CallTempReg4 = a4; +static constexpr Register CallTempReg5 = a5; + +static constexpr Register CallTempNonArgRegs[] = { t0, t1, t2, t3 }; +static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs); + +class ABIArgGenerator +{ + unsigned usedArgSlots_; + bool firstArgFloat; + ABIArg current_; + + public: + ABIArgGenerator(); + ABIArg next(MIRType argType); + ABIArg& current() { return current_; } + + uint32_t stackBytesConsumedSoFar() const { + if (usedArgSlots_ <= 8) + return 0; + + return (usedArgSlots_ - 8) * sizeof(int64_t); + } +}; + +static constexpr Register ABINonArgReg0 = t0; +static constexpr Register ABINonArgReg1 = t1; +static constexpr Register ABINonArgReg2 = t2; +static constexpr Register ABINonArgReturnReg0 = t0; +static constexpr Register ABINonArgReturnReg1 = t1; + +// TLS pointer argument register for WebAssembly functions. This must not alias +// any other register used for passing function arguments or return values. +// Preserved by WebAssembly functions. +static constexpr Register WasmTlsReg = s5; + +// Registers used for wasm table calls. These registers must be disjoint +// from the ABI argument registers, WasmTlsReg and each other. +static constexpr Register WasmTableCallScratchReg = ABINonArgReg0; +static constexpr Register WasmTableCallSigReg = ABINonArgReg1; +static constexpr Register WasmTableCallIndexReg = ABINonArgReg2; + +static constexpr Register JSReturnReg = v1; +static constexpr Register JSReturnReg_Type = JSReturnReg; +static constexpr Register JSReturnReg_Data = JSReturnReg; +static constexpr Register64 ReturnReg64(ReturnReg); +static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::f0, FloatRegisters::Single }; +static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegisters::Double }; +static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f23, FloatRegisters::Single }; +static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f23, FloatRegisters::Double }; +static constexpr FloatRegister SecondScratchFloat32Reg = { FloatRegisters::f21, FloatRegisters::Single }; +static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f21, FloatRegisters::Double }; + +// Registers used in the GenerateFFIIonExit Disable Activation block. +// None of these may be the second scratch register (t8). +static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data; +static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type; + +static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegisters::Double }; +static constexpr FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Double }; +static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegisters::Double }; +static constexpr FloatRegister f3 = { FloatRegisters::f3, FloatRegisters::Double }; +static constexpr FloatRegister f4 = { FloatRegisters::f4, FloatRegisters::Double }; +static constexpr FloatRegister f5 = { FloatRegisters::f5, FloatRegisters::Double }; +static constexpr FloatRegister f6 = { FloatRegisters::f6, FloatRegisters::Double }; +static constexpr FloatRegister f7 = { FloatRegisters::f7, FloatRegisters::Double }; +static constexpr FloatRegister f8 = { FloatRegisters::f8, FloatRegisters::Double }; +static constexpr FloatRegister f9 = { FloatRegisters::f9, FloatRegisters::Double }; +static constexpr FloatRegister f10 = { FloatRegisters::f10, FloatRegisters::Double }; +static constexpr FloatRegister f11 = { FloatRegisters::f11, FloatRegisters::Double }; +static constexpr FloatRegister f12 = { FloatRegisters::f12, FloatRegisters::Double }; +static constexpr FloatRegister f13 = { FloatRegisters::f13, FloatRegisters::Double }; +static constexpr FloatRegister f14 = { FloatRegisters::f14, FloatRegisters::Double }; +static constexpr FloatRegister f15 = { FloatRegisters::f15, FloatRegisters::Double }; +static constexpr FloatRegister f16 = { FloatRegisters::f16, FloatRegisters::Double }; +static constexpr FloatRegister f17 = { FloatRegisters::f17, FloatRegisters::Double }; +static constexpr FloatRegister f18 = { FloatRegisters::f18, FloatRegisters::Double }; +static constexpr FloatRegister f19 = { FloatRegisters::f19, FloatRegisters::Double }; +static constexpr FloatRegister f20 = { FloatRegisters::f20, FloatRegisters::Double }; +static constexpr FloatRegister f21 = { FloatRegisters::f21, FloatRegisters::Double }; +static constexpr FloatRegister f22 = { FloatRegisters::f22, FloatRegisters::Double }; +static constexpr FloatRegister f23 = { FloatRegisters::f23, FloatRegisters::Double }; +static constexpr FloatRegister f24 = { FloatRegisters::f24, FloatRegisters::Double }; +static constexpr FloatRegister f25 = { FloatRegisters::f25, FloatRegisters::Double }; +static constexpr FloatRegister f26 = { FloatRegisters::f26, FloatRegisters::Double }; +static constexpr FloatRegister f27 = { FloatRegisters::f27, FloatRegisters::Double }; +static constexpr FloatRegister f28 = { FloatRegisters::f28, FloatRegisters::Double }; +static constexpr FloatRegister f29 = { FloatRegisters::f29, FloatRegisters::Double }; +static constexpr FloatRegister f30 = { FloatRegisters::f30, FloatRegisters::Double }; +static constexpr FloatRegister f31 = { FloatRegisters::f31, FloatRegisters::Double }; + +// MIPS64 CPUs can only load multibyte data that is "naturally" +// eight-byte-aligned, sp register should be sixteen-byte-aligned. +static constexpr uint32_t ABIStackAlignment = 16; +static constexpr uint32_t JitStackAlignment = 16; + +static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value); +static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1, + "Stack alignment should be a non-zero multiple of sizeof(Value)"); + +// TODO this is just a filler to prevent a build failure. The MIPS SIMD +// alignment requirements still need to be explored. +// TODO Copy the static_asserts from x64/x86 assembler files. +static constexpr uint32_t SimdMemoryAlignment = 16; + +static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment; + +// Does this architecture support SIMD conversions between Uint32x4 and Float32x4? +static constexpr bool SupportsUint32x4FloatConversions = false; + +// Does this architecture support comparisons of unsigned integer vectors? +static constexpr bool SupportsUint8x16Compares = false; +static constexpr bool SupportsUint16x8Compares = false; +static constexpr bool SupportsUint32x4Compares = false; + +static constexpr Scale ScalePointer = TimesEight; + +class Assembler : public AssemblerMIPSShared +{ + public: + Assembler() + : AssemblerMIPSShared() + { } + + // MacroAssemblers hold onto gcthings, so they are traced by the GC. + void trace(JSTracer* trc); + + static uintptr_t GetPointer(uint8_t*); + + using AssemblerMIPSShared::bind; + + void bind(RepatchLabel* label); + void Bind(uint8_t* rawCode, CodeOffset* label, const void* address); + + static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); + static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); + + void bind(InstImm* inst, uintptr_t branch, uintptr_t target); + + // Copy the assembly code to the given buffer, and perform any pending + // relocations relying on the target address. + void executableCopy(uint8_t* buffer); + + static uint32_t PatchWrite_NearCallSize(); + + static uint64_t ExtractLoad64Value(Instruction* inst0); + static void UpdateLoad64Value(Instruction* inst0, uint64_t value); + static void WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value); + + + static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); + static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, + ImmPtr expectedValue); + static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, + PatchedImmPtr expectedValue); + + static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm); + static uint64_t ExtractInstructionImmediate(uint8_t* code); + + static void ToggleCall(CodeLocationLabel inst_, bool enabled); +}; // Assembler + +static const uint32_t NumIntArgRegs = 8; +static const uint32_t NumFloatArgRegs = NumIntArgRegs; + +static inline bool +GetIntArgReg(uint32_t usedArgSlots, Register* out) +{ + if (usedArgSlots < NumIntArgRegs) { + *out = Register::FromCode(a0.code() + usedArgSlots); + return true; + } + return false; +} + +static inline bool +GetFloatArgReg(uint32_t usedArgSlots, FloatRegister* out) +{ + if (usedArgSlots < NumFloatArgRegs) { + *out = FloatRegister::FromCode(f12.code() + usedArgSlots); + return true; + } + return false; +} + +// Get a register in which we plan to put a quantity that will be used as an +// integer argument. This differs from GetIntArgReg in that if we have no more +// actual argument registers to use we will fall back on using whatever +// CallTempReg* don't overlap the argument registers, and only fail once those +// run out too. +static inline bool +GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) +{ + // NOTE: We can't properly determine which regs are used if there are + // float arguments. If this is needed, we will have to guess. + MOZ_ASSERT(usedFloatArgs == 0); + + if (GetIntArgReg(usedIntArgs, out)) + return true; + // Unfortunately, we have to assume things about the point at which + // GetIntArgReg returns false, because we need to know how many registers it + // can allocate. + usedIntArgs -= NumIntArgRegs; + if (usedIntArgs >= NumCallTempNonArgRegs) + return false; + *out = CallTempNonArgRegs[usedIntArgs]; + return true; +} + +static inline uint32_t +GetArgStackDisp(uint32_t usedArgSlots) +{ + MOZ_ASSERT(usedArgSlots >= NumIntArgRegs); + return (usedArgSlots - NumIntArgRegs) * sizeof(int64_t); +} + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_Assembler_mips64_h */ diff --git a/js/src/jit/mips64/Bailouts-mips64.cpp b/js/src/jit/mips64/Bailouts-mips64.cpp new file mode 100644 index 000000000..3c6c4c6c4 --- /dev/null +++ b/js/src/jit/mips64/Bailouts-mips64.cpp @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/Bailouts-mips64.h" + +#include "jscntxt.h" +#include "jscompartment.h" + +using namespace js; +using namespace js::jit; + +BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, + BailoutStack* bailout) + : machine_(bailout->machineState()) +{ + uint8_t* sp = bailout->parentStackPointer(); + framePointer_ = sp + bailout->frameSize(); + topFrameSize_ = framePointer_ - sp; + + JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken()); + topIonScript_ = script->ionScript(); + + attachOnJitActivation(activations); + snapshotOffset_ = bailout->snapshotOffset(); +} diff --git a/js/src/jit/mips64/Bailouts-mips64.h b/js/src/jit/mips64/Bailouts-mips64.h new file mode 100644 index 000000000..1f80b303f --- /dev/null +++ b/js/src/jit/mips64/Bailouts-mips64.h @@ -0,0 +1,44 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_Bailouts_mips64_h +#define jit_mips64_Bailouts_mips64_h + +#include "jit/Bailouts.h" +#include "jit/JitCompartment.h" + +namespace js { +namespace jit { + +class BailoutStack +{ + RegisterDump::FPUArray fpregs_; + RegisterDump::GPRArray regs_; + uintptr_t frameSize_; + uintptr_t snapshotOffset_; + + public: + MachineState machineState() { + return MachineState::FromBailout(regs_, fpregs_); + } + uint32_t snapshotOffset() const { + return snapshotOffset_; + } + uint32_t frameSize() const { + return frameSize_; + } + uint8_t* parentStackPointer() { + return (uint8_t*)this + sizeof(BailoutStack); + } + static size_t offsetOfFrameSize() { + return offsetof(BailoutStack, frameSize_); + } +}; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_Bailouts_mips64_h */ diff --git a/js/src/jit/mips64/BaselineCompiler-mips64.cpp b/js/src/jit/mips64/BaselineCompiler-mips64.cpp new file mode 100644 index 000000000..72535bf1e --- /dev/null +++ b/js/src/jit/mips64/BaselineCompiler-mips64.cpp @@ -0,0 +1,16 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/BaselineCompiler-mips64.h" + +using namespace js; +using namespace js::jit; + +BaselineCompilerMIPS64::BaselineCompilerMIPS64(JSContext* cx, TempAllocator& alloc, + JSScript* script) + : BaselineCompilerMIPSShared(cx, alloc, script) +{ +} diff --git a/js/src/jit/mips64/BaselineCompiler-mips64.h b/js/src/jit/mips64/BaselineCompiler-mips64.h new file mode 100644 index 000000000..b06fdbf7a --- /dev/null +++ b/js/src/jit/mips64/BaselineCompiler-mips64.h @@ -0,0 +1,26 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_BaselineCompiler_mips64_h +#define jit_mips64_BaselineCompiler_mips64_h + +#include "jit/mips-shared/BaselineCompiler-mips-shared.h" + +namespace js { +namespace jit { + +class BaselineCompilerMIPS64 : public BaselineCompilerMIPSShared +{ + protected: + BaselineCompilerMIPS64(JSContext* cx, TempAllocator& alloc, JSScript* script); +}; + +typedef BaselineCompilerMIPS64 BaselineCompilerSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_BaselineCompiler_mips64_h */ diff --git a/js/src/jit/mips64/BaselineIC-mips64.cpp b/js/src/jit/mips64/BaselineIC-mips64.cpp new file mode 100644 index 000000000..5c0e6d0b7 --- /dev/null +++ b/js/src/jit/mips64/BaselineIC-mips64.cpp @@ -0,0 +1,47 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/BaselineCompiler.h" +#include "jit/BaselineIC.h" +#include "jit/BaselineJIT.h" +#include "jit/Linker.h" +#include "jit/SharedICHelpers.h" + +using namespace js; +using namespace js::jit; + +namespace js { +namespace jit { + +// ICCompare_Int32 + +bool +ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + // Guard that R0 is an integer and R1 is an integer. + Label failure; + Label conditionTrue; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + masm.branchTestInt32(Assembler::NotEqual, R1, &failure); + + // Compare payload regs of R0 and R1. + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + Assembler::Condition cond = JSOpToCondition(op, /* signed = */true); + masm.ma_cmp_set(R0.valueReg(), ExtractTemp0, ExtractTemp1, cond); + + masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0); + EmitReturnFromIC(masm); + + // Failure case - jump to next stub + masm.bind(&failure); + EmitStubGuardFailure(masm); + + return true; +} + +} // namespace jit +} // namespace js diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp new file mode 100644 index 000000000..45f0e69d7 --- /dev/null +++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp @@ -0,0 +1,774 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/CodeGenerator-mips64.h" + +#include "mozilla/MathAlgorithms.h" + +#include "jit/CodeGenerator.h" +#include "jit/JitCompartment.h" +#include "jit/JitFrames.h" +#include "jit/MIR.h" +#include "jit/MIRGraph.h" +#include "js/Conversions.h" +#include "vm/Shape.h" +#include "vm/TraceLogging.h" + +#include "jit/MacroAssembler-inl.h" +#include "jit/shared/CodeGenerator-shared-inl.h" + +using namespace js; +using namespace js::jit; + +class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorMIPS64> +{ + MTableSwitch* mir_; + CodeLabel jumpLabel_; + + void accept(CodeGeneratorMIPS64* codegen) { + codegen->visitOutOfLineTableSwitch(this); + } + + public: + OutOfLineTableSwitch(MTableSwitch* mir) + : mir_(mir) + {} + + MTableSwitch* mir() const { + return mir_; + } + + CodeLabel* jumpLabel() { + return &jumpLabel_; + } +}; + +void +CodeGeneratorMIPS64::visitOutOfLineBailout(OutOfLineBailout* ool) +{ + masm.push(ImmWord(ool->snapshot()->snapshotOffset())); + + masm.jump(&deoptLabel_); +} + +void +CodeGeneratorMIPS64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) +{ + MTableSwitch* mir = ool->mir(); + + masm.haltingAlign(sizeof(void*)); + masm.bind(ool->jumpLabel()->target()); + masm.addCodeLabel(*ool->jumpLabel()); + + for (size_t i = 0; i < mir->numCases(); i++) { + LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir(); + Label* caseheader = caseblock->label(); + uint32_t caseoffset = caseheader->offset(); + + // The entries of the jump table need to be absolute addresses and thus + // must be patched after codegen is finished. Each table entry uses 8 + // instructions (4 for load address, 2 for branch, and 2 padding). + CodeLabel cl; + masm.ma_li(ScratchRegister, cl.patchAt()); + masm.branch(ScratchRegister); + masm.as_nop(); + masm.as_nop(); + cl.target()->bind(caseoffset); + masm.addCodeLabel(cl); + } +} + +void +CodeGeneratorMIPS64::emitTableSwitchDispatch(MTableSwitch* mir, Register index, + Register address) +{ + Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); + + // Lower value with low value + if (mir->low() != 0) + masm.subPtr(Imm32(mir->low()), index); + + // Jump to default case if input is out of range + int32_t cases = mir->numCases(); + masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase); + + // To fill in the CodeLabels for the case entries, we need to first + // generate the case entries (we don't yet know their offsets in the + // instruction stream). + OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(mir); + addOutOfLineCode(ool, mir); + + // Compute the position where a pointer to the right case stands. + masm.ma_li(address, ool->jumpLabel()->patchAt()); + // index = size of table entry * index. + // See CodeGeneratorMIPS64::visitOutOfLineTableSwitch + masm.lshiftPtr(Imm32(5), index); + masm.addPtr(index, address); + + masm.branch(address); +} + +FrameSizeClass +FrameSizeClass::FromDepth(uint32_t frameDepth) +{ + return FrameSizeClass::None(); +} + +FrameSizeClass +FrameSizeClass::ClassLimit() +{ + return FrameSizeClass(0); +} + +uint32_t +FrameSizeClass::frameSize() const +{ + MOZ_CRASH("MIPS64 does not use frame size classes"); +} + +ValueOperand +CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos) +{ + return ValueOperand(ToRegister(ins->getOperand(pos))); +} + +ValueOperand +CodeGeneratorMIPS64::ToOutValue(LInstruction* ins) +{ + return ValueOperand(ToRegister(ins->getDef(0))); +} + +ValueOperand +CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos) +{ + return ValueOperand(ToRegister(ins->getTemp(pos))); +} + +void +CodeGeneratorMIPS64::visitBox(LBox* box) +{ + const LAllocation* in = box->getOperand(0); + const LDefinition* result = box->getDef(0); + + if (IsFloatingPointType(box->type())) { + FloatRegister reg = ToFloatRegister(in); + if (box->type() == MIRType::Float32) { + masm.convertFloat32ToDouble(reg, ScratchDoubleReg); + reg = ScratchDoubleReg; + } + masm.moveFromDouble(reg, ToRegister(result)); + } else { + masm.boxValue(ValueTypeFromMIRType(box->type()), ToRegister(in), ToRegister(result)); + } +} + +void +CodeGeneratorMIPS64::visitUnbox(LUnbox* unbox) +{ + MUnbox* mir = unbox->mir(); + + if (mir->fallible()) { + const ValueOperand value = ToValue(unbox, LUnbox::Input); + masm.splitTag(value, SecondScratchReg); + bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(MIRTypeToTag(mir->type())), + unbox->snapshot()); + } + + LAllocation* input = unbox->getOperand(LUnbox::Input); + Register result = ToRegister(unbox->output()); + if (input->isRegister()) { + Register inputReg = ToRegister(input); + switch (mir->type()) { + case MIRType::Int32: + masm.unboxInt32(inputReg, result); + break; + case MIRType::Boolean: + masm.unboxBoolean(inputReg, result); + break; + case MIRType::Object: + masm.unboxObject(inputReg, result); + break; + case MIRType::String: + masm.unboxString(inputReg, result); + break; + case MIRType::Symbol: + masm.unboxSymbol(inputReg, result); + break; + default: + MOZ_CRASH("Given MIRType cannot be unboxed."); + } + return; + } + + Address inputAddr = ToAddress(input); + switch (mir->type()) { + case MIRType::Int32: + masm.unboxInt32(inputAddr, result); + break; + case MIRType::Boolean: + masm.unboxBoolean(inputAddr, result); + break; + case MIRType::Object: + masm.unboxObject(inputAddr, result); + break; + case MIRType::String: + masm.unboxString(inputAddr, result); + break; + case MIRType::Symbol: + masm.unboxSymbol(inputAddr, result); + break; + default: + MOZ_CRASH("Given MIRType cannot be unboxed."); + } +} + +Register +CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value) +{ + MOZ_ASSERT(value.valueReg() != SecondScratchReg); + masm.splitTag(value.valueReg(), SecondScratchReg); + return SecondScratchReg; +} + +void +CodeGeneratorMIPS64::visitCompareB(LCompareB* lir) +{ + MCompare* mir = lir->mir(); + + const ValueOperand lhs = ToValue(lir, LCompareB::Lhs); + const LAllocation* rhs = lir->rhs(); + const Register output = ToRegister(lir->output()); + + MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); + Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); + + // Load boxed boolean in ScratchRegister. + if (rhs->isConstant()) + masm.moveValue(rhs->toConstant()->toJSValue(), ScratchRegister); + else + masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister); + + // Perform the comparison. + masm.cmpPtrSet(cond, lhs.valueReg(), ScratchRegister, output); +} + +void +CodeGeneratorMIPS64::visitCompareBAndBranch(LCompareBAndBranch* lir) +{ + MCompare* mir = lir->cmpMir(); + const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs); + const LAllocation* rhs = lir->rhs(); + + MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE); + + // Load boxed boolean in ScratchRegister. + if (rhs->isConstant()) + masm.moveValue(rhs->toConstant()->toJSValue(), ScratchRegister); + else + masm.boxValue(JSVAL_TYPE_BOOLEAN, ToRegister(rhs), ScratchRegister); + + // Perform the comparison. + Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); + emitBranch(lhs.valueReg(), ScratchRegister, cond, lir->ifTrue(), lir->ifFalse()); +} + +void +CodeGeneratorMIPS64::visitCompareBitwise(LCompareBitwise* lir) +{ + MCompare* mir = lir->mir(); + Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); + const ValueOperand lhs = ToValue(lir, LCompareBitwise::LhsInput); + const ValueOperand rhs = ToValue(lir, LCompareBitwise::RhsInput); + const Register output = ToRegister(lir->output()); + + MOZ_ASSERT(IsEqualityOp(mir->jsop())); + + masm.cmpPtrSet(cond, lhs.valueReg(), rhs.valueReg(), output); +} + +void +CodeGeneratorMIPS64::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir) +{ + MCompare* mir = lir->cmpMir(); + Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop()); + const ValueOperand lhs = ToValue(lir, LCompareBitwiseAndBranch::LhsInput); + const ValueOperand rhs = ToValue(lir, LCompareBitwiseAndBranch::RhsInput); + + MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ || + mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE); + + emitBranch(lhs.valueReg(), rhs.valueReg(), cond, lir->ifTrue(), lir->ifFalse()); +} + +void +CodeGeneratorMIPS64::visitCompareI64(LCompareI64* lir) +{ + MCompare* mir = lir->mir(); + MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 || + mir->compareType() == MCompare::Compare_UInt64); + + const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs); + const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs); + Register lhsReg = ToRegister64(lhs).reg; + Register output = ToRegister(lir->output()); + Register rhsReg; + + if (IsConstant(rhs)) { + rhsReg = ScratchRegister; + masm.ma_li(rhsReg, ImmWord(ToInt64(rhs))); + } else { + rhsReg = ToRegister64(rhs).reg; + } + + bool isSigned = mir->compareType() == MCompare::Compare_Int64; + masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg, output); +} + +void +CodeGeneratorMIPS64::visitCompareI64AndBranch(LCompareI64AndBranch* lir) +{ + MCompare* mir = lir->cmpMir(); + MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 || + mir->compareType() == MCompare::Compare_UInt64); + + const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs); + const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs); + Register lhsReg = ToRegister64(lhs).reg; + Register rhsReg; + + if (IsConstant(rhs)) { + rhsReg = ScratchRegister; + masm.ma_li(rhsReg, ImmWord(ToInt64(rhs))); + } else { + rhsReg = ToRegister64(rhs).reg; + } + + bool isSigned = mir->compareType() == MCompare::Compare_Int64; + Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned); + emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse()); +} + +void +CodeGeneratorMIPS64::visitDivOrModI64(LDivOrModI64* lir) +{ + Register lhs = ToRegister(lir->lhs()); + Register rhs = ToRegister(lir->rhs()); + Register output = ToRegister(lir->output()); + + Label done; + + // Handle divide by zero. + if (lir->canBeDivideByZero()) + masm.ma_b(rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero); + + // Handle an integer overflow exception from INT64_MIN / -1. + if (lir->canBeNegativeOverflow()) { + Label notmin; + masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), ¬min); + masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), ¬min); + if (lir->mir()->isMod()) { + masm.ma_xor(output, output); + } else { + masm.jump(trap(lir, wasm::Trap::IntegerOverflow)); + } + masm.jump(&done); + masm.bind(¬min); + } + + masm.as_ddiv(lhs, rhs); + + if (lir->mir()->isMod()) + masm.as_mfhi(output); + else + masm.as_mflo(output); + + masm.bind(&done); +} + +void +CodeGeneratorMIPS64::visitUDivOrModI64(LUDivOrModI64* lir) +{ + Register lhs = ToRegister(lir->lhs()); + Register rhs = ToRegister(lir->rhs()); + Register output = ToRegister(lir->output()); + + Label done; + + // Prevent divide by zero. + if (lir->canBeDivideByZero()) + masm.ma_b(rhs, rhs, trap(lir, wasm::Trap::IntegerDivideByZero), Assembler::Zero); + + masm.as_ddivu(lhs, rhs); + + if (lir->mir()->isMod()) + masm.as_mfhi(output); + else + masm.as_mflo(output); + + masm.bind(&done); +} + +template <typename T> +void +CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) +{ + const MWasmLoad* mir = lir->mir(); + + MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); + + uint32_t offset = mir->access().offset(); + MOZ_ASSERT(offset < wasm::OffsetGuardLimit); + + Register ptr = ToRegister(lir->ptr()); + + // Maybe add the offset. + if (offset) { + Register ptrPlusOffset = ToRegister(lir->ptrCopy()); + masm.addPtr(Imm32(offset), ptrPlusOffset); + ptr = ptrPlusOffset; + } else { + MOZ_ASSERT(lir->ptrCopy()->isBogusTemp()); + } + + unsigned byteSize = mir->access().byteSize(); + bool isSigned; + + switch (mir->access().type()) { + case Scalar::Int8: isSigned = true; break; + case Scalar::Uint8: isSigned = false; break; + case Scalar::Int16: isSigned = true; break; + case Scalar::Uint16: isSigned = false; break; + case Scalar::Int32: isSigned = true; break; + case Scalar::Uint32: isSigned = false; break; + case Scalar::Int64: isSigned = true; break; + default: MOZ_CRASH("unexpected array type"); + } + + masm.memoryBarrier(mir->access().barrierBefore()); + + if (mir->access().isUnaligned()) { + Register temp = ToRegister(lir->getTemp(1)); + + masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne), + temp, static_cast<LoadStoreSize>(8 * byteSize), + isSigned ? SignExtend : ZeroExtend); + return; + } + + masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne), + static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); + + masm.memoryBarrier(mir->access().barrierAfter()); +} + +void +CodeGeneratorMIPS64::visitWasmLoadI64(LWasmLoadI64* lir) +{ + emitWasmLoadI64(lir); +} + +void +CodeGeneratorMIPS64::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) +{ + emitWasmLoadI64(lir); +} + +template <typename T> +void +CodeGeneratorMIPS64::emitWasmStoreI64(T* lir) +{ + const MWasmStore* mir = lir->mir(); + + MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); + + uint32_t offset = mir->access().offset(); + MOZ_ASSERT(offset < wasm::OffsetGuardLimit); + + Register ptr = ToRegister(lir->ptr()); + + // Maybe add the offset. + if (offset) { + Register ptrPlusOffset = ToRegister(lir->ptrCopy()); + masm.addPtr(Imm32(offset), ptrPlusOffset); + ptr = ptrPlusOffset; + } else { + MOZ_ASSERT(lir->ptrCopy()->isBogusTemp()); + } + + unsigned byteSize = mir->access().byteSize(); + bool isSigned; + + switch (mir->access().type()) { + case Scalar::Int8: isSigned = true; break; + case Scalar::Uint8: isSigned = false; break; + case Scalar::Int16: isSigned = true; break; + case Scalar::Uint16: isSigned = false; break; + case Scalar::Int32: isSigned = true; break; + case Scalar::Uint32: isSigned = false; break; + case Scalar::Int64: isSigned = true; break; + default: MOZ_CRASH("unexpected array type"); + } + + masm.memoryBarrier(mir->access().barrierBefore()); + + if (mir->access().isUnaligned()) { + Register temp = ToRegister(lir->getTemp(1)); + + masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne), + temp, static_cast<LoadStoreSize>(8 * byteSize), + isSigned ? SignExtend : ZeroExtend); + return; + } + masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne), + static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); + + masm.memoryBarrier(mir->access().barrierAfter()); +} + +void +CodeGeneratorMIPS64::visitWasmStoreI64(LWasmStoreI64* lir) +{ + emitWasmStoreI64(lir); +} + +void +CodeGeneratorMIPS64::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) +{ + emitWasmStoreI64(lir); +} + +void +CodeGeneratorMIPS64::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins) +{ + const MWasmLoadGlobalVar* mir = ins->mir(); + unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias; + MOZ_ASSERT(mir->type() == MIRType::Int64); + masm.load64(Address(GlobalReg, addr), ToOutRegister64(ins)); +} + +void +CodeGeneratorMIPS64::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins) +{ + const MWasmStoreGlobalVar* mir = ins->mir(); + unsigned addr = mir->globalDataOffset() - WasmGlobalRegBias; + MOZ_ASSERT(mir->value()->type() == MIRType::Int64); + masm.store64(ToRegister64(ins->value()), Address(GlobalReg, addr)); +} + +void +CodeGeneratorMIPS64::visitWasmSelectI64(LWasmSelectI64* lir) +{ + MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); + + Register cond = ToRegister(lir->condExpr()); + const LInt64Allocation falseExpr = lir->falseExpr(); + + Register64 out = ToOutRegister64(lir); + MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input"); + + if (falseExpr.value().isRegister()) { + masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond); + } else { + Label done; + masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump); + masm.loadPtr(ToAddress(falseExpr.value()), out.reg); + masm.bind(&done); + } +} + +void +CodeGeneratorMIPS64::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) +{ + MOZ_ASSERT(lir->mir()->type() == MIRType::Double); + MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64); + masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output())); +} + +void +CodeGeneratorMIPS64::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) +{ + MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); + MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double); + masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input())); +} + +void +CodeGeneratorMIPS64::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) +{ + const LAllocation* input = lir->getOperand(0); + Register output = ToRegister(lir->output()); + + if (lir->mir()->isUnsigned()) + masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32)); + else + masm.ma_sll(output, ToRegister(input), Imm32(0)); +} + +void +CodeGeneratorMIPS64::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) +{ + const LAllocation* input = lir->getOperand(0); + Register output = ToRegister(lir->output()); + + if (lir->mir()->bottomHalf()) { + if (input->isMemory()) + masm.load32(ToAddress(input), output); + else + masm.ma_sll(output, ToRegister(input), Imm32(0)); + } else { + MOZ_CRASH("Not implemented."); + } +} + +void +CodeGeneratorMIPS64::visitClzI64(LClzI64* lir) +{ + Register64 input = ToRegister64(lir->getInt64Operand(0)); + Register64 output = ToOutRegister64(lir); + masm.clz64(input, output.reg); +} + +void +CodeGeneratorMIPS64::visitCtzI64(LCtzI64* lir) +{ + Register64 input = ToRegister64(lir->getInt64Operand(0)); + Register64 output = ToOutRegister64(lir); + masm.ctz64(input, output.reg); +} + +void +CodeGeneratorMIPS64::visitNotI64(LNotI64* lir) +{ + Register64 input = ToRegister64(lir->getInt64Operand(0)); + Register output = ToRegister(lir->output()); + + masm.cmp64Set(Assembler::Equal, input.reg, Imm32(0), output); +} + +void +CodeGeneratorMIPS64::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) +{ + FloatRegister input = ToFloatRegister(lir->input()); + Register output = ToRegister(lir->output()); + + MWasmTruncateToInt64* mir = lir->mir(); + MIRType fromType = mir->input()->type(); + + MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32); + + auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input); + addOutOfLineCode(ool, mir); + + if (mir->isUnsigned()) { + Label isLarge, done; + + if (fromType == MIRType::Double) { + masm.loadConstantDouble(double(INT64_MAX), ScratchDoubleReg); + masm.ma_bc1d(ScratchDoubleReg, input, &isLarge, + Assembler::DoubleLessThanOrEqual, ShortJump); + + masm.as_truncld(ScratchDoubleReg, input); + } else { + masm.loadConstantFloat32(float(INT64_MAX), ScratchFloat32Reg); + masm.ma_bc1s(ScratchFloat32Reg, input, &isLarge, + Assembler::DoubleLessThanOrEqual, ShortJump); + + masm.as_truncls(ScratchDoubleReg, input); + } + + // Check that the result is in the uint64_t range. + masm.moveFromDouble(ScratchDoubleReg, output); + masm.as_cfc1(ScratchRegister, Assembler::FCSR); + masm.as_ext(ScratchRegister, ScratchRegister, 16, 1); + masm.ma_dsrl(SecondScratchReg, output, Imm32(63)); + masm.ma_or(SecondScratchReg, ScratchRegister); + masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual); + + masm.ma_b(&done, ShortJump); + + // The input is greater than double(INT64_MAX). + masm.bind(&isLarge); + if (fromType == MIRType::Double) { + masm.as_subd(ScratchDoubleReg, input, ScratchDoubleReg); + masm.as_truncld(ScratchDoubleReg, ScratchDoubleReg); + } else { + masm.as_subs(ScratchDoubleReg, input, ScratchDoubleReg); + masm.as_truncls(ScratchDoubleReg, ScratchDoubleReg); + } + + // Check that the result is in the uint64_t range. + masm.moveFromDouble(ScratchDoubleReg, output); + masm.as_cfc1(ScratchRegister, Assembler::FCSR); + masm.as_ext(ScratchRegister, ScratchRegister, 16, 1); + masm.ma_dsrl(SecondScratchReg, output, Imm32(63)); + masm.ma_or(SecondScratchReg, ScratchRegister); + masm.ma_b(SecondScratchReg, Imm32(0), ool->entry(), Assembler::NotEqual); + + masm.ma_li(ScratchRegister, Imm32(1)); + masm.ma_dins(output, ScratchRegister, Imm32(63), Imm32(1)); + + masm.bind(&done); + return; + } + + // When the input value is Infinity, NaN, or rounds to an integer outside the + // range [INT64_MIN; INT64_MAX + 1[, the Invalid Operation flag is set in the FCSR. + if (fromType == MIRType::Double) + masm.as_truncld(ScratchDoubleReg, input); + else + masm.as_truncls(ScratchDoubleReg, input); + + // Check that the result is in the int64_t range. + masm.as_cfc1(output, Assembler::FCSR); + masm.as_ext(output, output, 16, 1); + masm.ma_b(output, Imm32(0), ool->entry(), Assembler::NotEqual); + + masm.bind(ool->rejoin()); + masm.moveFromDouble(ScratchDoubleReg, output); +} + +void +CodeGeneratorMIPS64::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) +{ + Register input = ToRegister(lir->input()); + FloatRegister output = ToFloatRegister(lir->output()); + + MIRType outputType = lir->mir()->type(); + MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32); + + if (outputType == MIRType::Double) { + if (lir->mir()->isUnsigned()) + masm.convertUInt64ToDouble(input, output); + else + masm.convertInt64ToDouble(input, output); + } else { + if (lir->mir()->isUnsigned()) + masm.convertUInt64ToFloat32(input, output); + else + masm.convertInt64ToFloat32(input, output); + } +} + +void +CodeGeneratorMIPS64::visitTestI64AndBranch(LTestI64AndBranch* lir) +{ + Register64 input = ToRegister64(lir->getInt64Operand(0)); + MBasicBlock* ifTrue = lir->ifTrue(); + MBasicBlock* ifFalse = lir->ifFalse(); + + emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse); +} + +void +CodeGeneratorMIPS64::setReturnDoubleRegs(LiveRegisterSet* regs) +{ + MOZ_ASSERT(ReturnFloat32Reg.reg_ == FloatRegisters::f0); + MOZ_ASSERT(ReturnDoubleReg.reg_ == FloatRegisters::f0); + FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Single }; + regs->add(ReturnFloat32Reg); + regs->add(f1); + regs->add(ReturnDoubleReg); +} diff --git a/js/src/jit/mips64/CodeGenerator-mips64.h b/js/src/jit/mips64/CodeGenerator-mips64.h new file mode 100644 index 000000000..3c859ef4c --- /dev/null +++ b/js/src/jit/mips64/CodeGenerator-mips64.h @@ -0,0 +1,102 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_CodeGenerator_mips64_h +#define jit_mips64_CodeGenerator_mips64_h + +#include "jit/mips-shared/CodeGenerator-mips-shared.h" + +namespace js { +namespace jit { + +class CodeGeneratorMIPS64 : public CodeGeneratorMIPSShared +{ + protected: + void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + MOZ_ASSERT(value.valueReg() != SecondScratchReg); + masm.splitTag(value.valueReg(), SecondScratchReg); + emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse); + } + void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + MOZ_ASSERT(value.valueReg() != SecondScratchReg); + masm.splitTag(value.valueReg(), SecondScratchReg); + emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue, ifFalse); + } + void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value, + MBasicBlock* ifTrue, MBasicBlock* ifFalse) + { + MOZ_ASSERT(value.valueReg() != SecondScratchReg); + masm.splitTag(value.valueReg(), SecondScratchReg); + emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse); + } + + void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base); + + template <typename T> + void emitWasmLoadI64(T* ins); + template <typename T> + void emitWasmStoreI64(T* ins); + + public: + void visitCompareB(LCompareB* lir); + void visitCompareBAndBranch(LCompareBAndBranch* lir); + void visitCompareBitwise(LCompareBitwise* lir); + void visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir); + void visitCompareI64(LCompareI64* lir); + void visitCompareI64AndBranch(LCompareI64AndBranch* lir); + void visitDivOrModI64(LDivOrModI64* lir); + void visitUDivOrModI64(LUDivOrModI64* lir); + void visitWasmLoadI64(LWasmLoadI64* lir); + void visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir); + void visitWasmStoreI64(LWasmStoreI64* ins); + void visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* ins); + void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins); + void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins); + void visitWasmSelectI64(LWasmSelectI64* ins); + void visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir); + void visitWasmReinterpretToI64(LWasmReinterpretToI64* lir); + void visitExtendInt32ToInt64(LExtendInt32ToInt64* lir); + void visitWrapInt64ToInt32(LWrapInt64ToInt32* lir); + void visitClzI64(LClzI64* lir); + void visitCtzI64(LCtzI64* lir); + void visitNotI64(LNotI64* lir); + void visitWasmTruncateToInt64(LWasmTruncateToInt64* lir); + void visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir); + void visitTestI64AndBranch(LTestI64AndBranch* lir); + + // Out of line visitors. + void visitOutOfLineBailout(OutOfLineBailout* ool); + void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool); + protected: + ValueOperand ToValue(LInstruction* ins, size_t pos); + ValueOperand ToOutValue(LInstruction* ins); + ValueOperand ToTempValue(LInstruction* ins, size_t pos); + + // Functions for LTestVAndBranch. + Register splitTagForTest(const ValueOperand& value); + + public: + CodeGeneratorMIPS64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm) + : CodeGeneratorMIPSShared(gen, graph, masm) + { } + + public: + void visitBox(LBox* box); + void visitUnbox(LUnbox* unbox); + + void setReturnDoubleRegs(LiveRegisterSet* regs); +}; + +typedef CodeGeneratorMIPS64 CodeGeneratorSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_CodeGenerator_mips64_h */ diff --git a/js/src/jit/mips64/LIR-mips64.h b/js/src/jit/mips64/LIR-mips64.h new file mode 100644 index 000000000..b47ff0d59 --- /dev/null +++ b/js/src/jit/mips64/LIR-mips64.h @@ -0,0 +1,140 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_LIR_mips64_h +#define jit_mips64_LIR_mips64_h + +namespace js { +namespace jit { + +class LUnbox : public LInstructionHelper<1, 1, 0> +{ + public: + LIR_HEADER(Unbox); + + explicit LUnbox(const LAllocation& input) { + setOperand(0, input); + } + + static const size_t Input = 0; + + MUnbox* mir() const { + return mir_->toUnbox(); + } + const char* extraName() const { + return StringFromMIRType(mir()->type()); + } +}; + +class LUnboxFloatingPoint : public LUnbox +{ + MIRType type_; + + public: + LIR_HEADER(UnboxFloatingPoint); + + LUnboxFloatingPoint(const LAllocation& input, MIRType type) + : LUnbox(input), + type_(type) + { } + + MIRType type() const { + return type_; + } +}; + +class LDivOrModI64 : public LBinaryMath<1> +{ + public: + LIR_HEADER(DivOrModI64) + + LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, temp); + } + + const LDefinition* remainder() { + return getTemp(0); + } + + MBinaryArithInstruction* mir() const { + MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); + return static_cast<MBinaryArithInstruction*>(mir_); + } + bool canBeDivideByZero() const { + if (mir_->isMod()) + return mir_->toMod()->canBeDivideByZero(); + return mir_->toDiv()->canBeDivideByZero(); + } + bool canBeNegativeOverflow() const { + if (mir_->isMod()) + return mir_->toMod()->canBeNegativeDividend(); + return mir_->toDiv()->canBeNegativeOverflow(); + } + wasm::TrapOffset trapOffset() const { + MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); + if (mir_->isMod()) + return mir_->toMod()->trapOffset(); + return mir_->toDiv()->trapOffset(); + } +}; + +class LUDivOrModI64 : public LBinaryMath<1> +{ + public: + LIR_HEADER(UDivOrModI64); + + LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) { + setOperand(0, lhs); + setOperand(1, rhs); + setTemp(0, temp); + } + + const LDefinition* remainder() { + return getTemp(0); + } + + const char* extraName() const { + return mir()->isTruncated() ? "Truncated" : nullptr; + } + + MBinaryArithInstruction* mir() const { + MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); + return static_cast<MBinaryArithInstruction*>(mir_); + } + + bool canBeDivideByZero() const { + if (mir_->isMod()) + return mir_->toMod()->canBeDivideByZero(); + return mir_->toDiv()->canBeDivideByZero(); + } + wasm::TrapOffset trapOffset() const { + MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); + if (mir_->isMod()) + return mir_->toMod()->trapOffset(); + return mir_->toDiv()->trapOffset(); + } +}; + +class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> +{ + public: + LIR_HEADER(WasmTruncateToInt64); + + explicit LWasmTruncateToInt64(const LAllocation& in) { + setOperand(0, in); + } + + MWasmTruncateToInt64* mir() const { + return mir_->toWasmTruncateToInt64(); + } +}; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_LIR_mips64_h */ diff --git a/js/src/jit/mips64/LOpcodes-mips64.h b/js/src/jit/mips64/LOpcodes-mips64.h new file mode 100644 index 000000000..166bfb1b1 --- /dev/null +++ b/js/src/jit/mips64/LOpcodes-mips64.h @@ -0,0 +1,24 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_LOpcodes_mips64_h__ +#define jit_mips64_LOpcodes_mips64_h__ + +#include "jit/shared/LOpcodes-shared.h" + +#define LIR_CPU_OPCODE_LIST(_) \ + _(ModMaskI) \ + _(DivOrModI64) \ + _(UDivOrMod) \ + _(UDivOrModI64) \ + _(WasmUnalignedLoad) \ + _(WasmUnalignedStore) \ + _(WasmUnalignedLoadI64) \ + _(WasmUnalignedStoreI64) \ + _(WasmTruncateToInt64) \ + _(Int64ToFloatingPoint) + +#endif // jit_mips64_LOpcodes_mips64_h__ diff --git a/js/src/jit/mips64/Lowering-mips64.cpp b/js/src/jit/mips64/Lowering-mips64.cpp new file mode 100644 index 000000000..bcc61163f --- /dev/null +++ b/js/src/jit/mips64/Lowering-mips64.cpp @@ -0,0 +1,184 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/Lowering-mips64.h" + +#include "jit/mips64/Assembler-mips64.h" + +#include "jit/MIR.h" + +#include "jit/shared/Lowering-shared-inl.h" + +using namespace js; +using namespace js::jit; + +void +LIRGeneratorMIPS64::defineInt64Phi(MPhi* phi, size_t lirIndex) +{ + defineTypedPhi(phi, lirIndex); +} + +void +LIRGeneratorMIPS64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, + LBlock* block, size_t lirIndex) +{ + lowerTypedPhiInput(phi, inputPosition, block, lirIndex); +} + +LBoxAllocation +LIRGeneratorMIPS64::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart) +{ + MOZ_ASSERT(mir->type() == MIRType::Value); + + ensureDefined(mir); + return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart)); +} + +void +LIRGeneratorMIPS64::lowerDivI64(MDiv* div) +{ + if (div->isUnsigned()) { + lowerUDivI64(div); + return; + } + + LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), + temp()); + defineInt64(lir, div); +} + +void +LIRGeneratorMIPS64::lowerModI64(MMod* mod) +{ + if (mod->isUnsigned()) { + lowerUModI64(mod); + return; + } + + LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), + temp()); + defineInt64(lir, mod); +} + +void +LIRGeneratorMIPS64::lowerUDivI64(MDiv* div) +{ + LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(div->lhs()), + useRegister(div->rhs()), + temp()); + defineInt64(lir, div); +} + +void +LIRGeneratorMIPS64::lowerUModI64(MMod* mod) +{ + LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()), + useRegister(mod->rhs()), + temp()); + defineInt64(lir, mod); +} + +void +LIRGeneratorMIPS64::visitBox(MBox* box) +{ + MDefinition* opd = box->getOperand(0); + + // If the operand is a constant, emit near its uses. + if (opd->isConstant() && box->canEmitAtUses()) { + emitAtUses(box); + return; + } + + if (opd->isConstant()) { + define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX)); + } else { + LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type()); + define(ins, box, LDefinition(LDefinition::BOX)); + } +} + +void +LIRGeneratorMIPS64::visitUnbox(MUnbox* unbox) +{ + MDefinition* box = unbox->getOperand(0); + + if (box->type() == MIRType::ObjectOrNull) { + LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box)); + if (unbox->fallible()) + assignSnapshot(lir, unbox->bailoutKind()); + defineReuseInput(lir, unbox, 0); + return; + } + + MOZ_ASSERT(box->type() == MIRType::Value); + + LUnbox* lir; + if (IsFloatingPointType(unbox->type())) { + lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type()); + } else if (unbox->fallible()) { + // If the unbox is fallible, load the Value in a register first to + // avoid multiple loads. + lir = new(alloc()) LUnbox(useRegisterAtStart(box)); + } else { + lir = new(alloc()) LUnbox(useAtStart(box)); + } + + if (unbox->fallible()) + assignSnapshot(lir, unbox->bailoutKind()); + + define(lir, unbox); +} + +void +LIRGeneratorMIPS64::visitReturn(MReturn* ret) +{ + MDefinition* opd = ret->getOperand(0); + MOZ_ASSERT(opd->type() == MIRType::Value); + + LReturn* ins = new(alloc()) LReturn; + ins->setOperand(0, useFixed(opd, JSReturnReg)); + add(ins); +} + +void +LIRGeneratorMIPS64::defineUntypedPhi(MPhi* phi, size_t lirIndex) +{ + defineTypedPhi(phi, lirIndex); +} + +void +LIRGeneratorMIPS64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, + LBlock* block, size_t lirIndex) +{ + lowerTypedPhiInput(phi, inputPosition, block, lirIndex); +} + +void +LIRGeneratorMIPS64::lowerTruncateDToInt32(MTruncateToInt32* ins) +{ + MDefinition* opd = ins->input(); + MOZ_ASSERT(opd->type() == MIRType::Double); + + define(new(alloc()) + LTruncateDToInt32(useRegister(opd), tempDouble()), ins); +} + +void +LIRGeneratorMIPS64::lowerTruncateFToInt32(MTruncateToInt32* ins) +{ + MDefinition* opd = ins->input(); + MOZ_ASSERT(opd->type() == MIRType::Float32); + + define(new(alloc()) + LTruncateFToInt32(useRegister(opd), tempFloat32()), ins); +} + +void +LIRGeneratorMIPS64::visitRandom(MRandom* ins) +{ + LRandom *lir = new(alloc()) LRandom(temp(), temp(), temp()); + defineFixed(lir, ins, LFloatReg(ReturnDoubleReg)); +} diff --git a/js/src/jit/mips64/Lowering-mips64.h b/js/src/jit/mips64/Lowering-mips64.h new file mode 100644 index 000000000..7427f1ecb --- /dev/null +++ b/js/src/jit/mips64/Lowering-mips64.h @@ -0,0 +1,57 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_Lowering_mips64_h +#define jit_mips64_Lowering_mips64_h + +#include "jit/mips-shared/Lowering-mips-shared.h" + +namespace js { +namespace jit { + +class LIRGeneratorMIPS64 : public LIRGeneratorMIPSShared +{ + protected: + LIRGeneratorMIPS64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph) + : LIRGeneratorMIPSShared(gen, graph, lirGraph) + { } + + protected: + void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t); + void defineInt64Phi(MPhi*, size_t); + + // Returns a box allocation. reg2 is ignored on 64-bit platforms. + LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2, + bool useAtStart = false); + + inline LDefinition tempToUnbox() { + return temp(); + } + + void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex); + void defineUntypedPhi(MPhi* phi, size_t lirIndex); + + void lowerTruncateDToInt32(MTruncateToInt32* ins); + void lowerTruncateFToInt32(MTruncateToInt32* ins); + + void lowerDivI64(MDiv* div); + void lowerModI64(MMod* mod); + void lowerUDivI64(MDiv* div); + void lowerUModI64(MMod* mod); + + public: + void visitBox(MBox* box); + void visitUnbox(MUnbox* unbox); + void visitReturn(MReturn* ret); + void visitRandom(MRandom* ins); +}; + +typedef LIRGeneratorMIPS64 LIRGeneratorSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_Lowering_mips64_h */ diff --git a/js/src/jit/mips64/MacroAssembler-mips64-inl.h b/js/src/jit/mips64/MacroAssembler-mips64-inl.h new file mode 100644 index 000000000..f5737748b --- /dev/null +++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h @@ -0,0 +1,774 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_MacroAssembler_mips64_inl_h +#define jit_mips64_MacroAssembler_mips64_inl_h + +#include "jit/mips64/MacroAssembler-mips64.h" + +#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h" + +namespace js { +namespace jit { + +//{{{ check_macroassembler_style + +void +MacroAssembler::move64(Register64 src, Register64 dest) +{ + movePtr(src.reg, dest.reg); +} + +void +MacroAssembler::move64(Imm64 imm, Register64 dest) +{ + movePtr(ImmWord(imm.value), dest.reg); +} + +// =============================================================== +// Logical instructions + +void +MacroAssembler::andPtr(Register src, Register dest) +{ + ma_and(dest, src); +} + +void +MacroAssembler::andPtr(Imm32 imm, Register dest) +{ + ma_and(dest, imm); +} + +void +MacroAssembler::and64(Imm64 imm, Register64 dest) +{ + ma_li(ScratchRegister, ImmWord(imm.value)); + ma_and(dest.reg, ScratchRegister); +} + +void +MacroAssembler::and64(Register64 src, Register64 dest) +{ + ma_and(dest.reg, src.reg); +} + +void +MacroAssembler::and64(const Operand& src, Register64 dest) +{ + if (src.getTag() == Operand::MEM) { + Register64 scratch(ScratchRegister); + + load64(src.toAddress(), scratch); + and64(scratch, dest); + } else { + and64(Register64(src.toReg()), dest); + } +} + +void +MacroAssembler::or64(Imm64 imm, Register64 dest) +{ + ma_li(ScratchRegister, ImmWord(imm.value)); + ma_or(dest.reg, ScratchRegister); +} + +void +MacroAssembler::xor64(Imm64 imm, Register64 dest) +{ + ma_li(ScratchRegister, ImmWord(imm.value)); + ma_xor(dest.reg, ScratchRegister); +} + +void +MacroAssembler::orPtr(Register src, Register dest) +{ + ma_or(dest, src); +} + +void +MacroAssembler::orPtr(Imm32 imm, Register dest) +{ + ma_or(dest, imm); +} + +void +MacroAssembler::or64(Register64 src, Register64 dest) +{ + ma_or(dest.reg, src.reg); +} + +void +MacroAssembler::or64(const Operand& src, Register64 dest) +{ + if (src.getTag() == Operand::MEM) { + Register64 scratch(ScratchRegister); + + load64(src.toAddress(), scratch); + or64(scratch, dest); + } else { + or64(Register64(src.toReg()), dest); + } +} + +void +MacroAssembler::xor64(Register64 src, Register64 dest) +{ + ma_xor(dest.reg, src.reg); +} + +void +MacroAssembler::xor64(const Operand& src, Register64 dest) +{ + if (src.getTag() == Operand::MEM) { + Register64 scratch(ScratchRegister); + + load64(src.toAddress(), scratch); + xor64(scratch, dest); + } else { + xor64(Register64(src.toReg()), dest); + } +} + +void +MacroAssembler::xorPtr(Register src, Register dest) +{ + ma_xor(dest, src); +} + +void +MacroAssembler::xorPtr(Imm32 imm, Register dest) +{ + ma_xor(dest, imm); +} + +// =============================================================== +// Arithmetic functions + +void +MacroAssembler::addPtr(Register src, Register dest) +{ + ma_daddu(dest, src); +} + +void +MacroAssembler::addPtr(Imm32 imm, Register dest) +{ + ma_daddu(dest, imm); +} + +void +MacroAssembler::addPtr(ImmWord imm, Register dest) +{ + movePtr(imm, ScratchRegister); + addPtr(ScratchRegister, dest); +} + +void +MacroAssembler::add64(Register64 src, Register64 dest) +{ + addPtr(src.reg, dest.reg); +} + +void +MacroAssembler::add64(const Operand& src, Register64 dest) +{ + if (src.getTag() == Operand::MEM) { + Register64 scratch(ScratchRegister); + + load64(src.toAddress(), scratch); + add64(scratch, dest); + } else { + add64(Register64(src.toReg()), dest); + } +} + +void +MacroAssembler::add64(Imm32 imm, Register64 dest) +{ + ma_daddu(dest.reg, imm); +} + +void +MacroAssembler::add64(Imm64 imm, Register64 dest) +{ + MOZ_ASSERT(dest.reg != ScratchRegister); + mov(ImmWord(imm.value), ScratchRegister); + ma_daddu(dest.reg, ScratchRegister); +} + +void +MacroAssembler::subPtr(Register src, Register dest) +{ + as_dsubu(dest, dest, src); +} + +void +MacroAssembler::subPtr(Imm32 imm, Register dest) +{ + ma_dsubu(dest, dest, imm); +} + +void +MacroAssembler::sub64(Register64 src, Register64 dest) +{ + as_dsubu(dest.reg, dest.reg, src.reg); +} + +void +MacroAssembler::sub64(const Operand& src, Register64 dest) +{ + if (src.getTag() == Operand::MEM) { + Register64 scratch(ScratchRegister); + + load64(src.toAddress(), scratch); + sub64(scratch, dest); + } else { + sub64(Register64(src.toReg()), dest); + } +} + +void +MacroAssembler::sub64(Imm64 imm, Register64 dest) +{ + MOZ_ASSERT(dest.reg != ScratchRegister); + mov(ImmWord(imm.value), ScratchRegister); + as_dsubu(dest.reg, dest.reg, ScratchRegister); +} + +void +MacroAssembler::mul64(Imm64 imm, const Register64& dest) +{ + MOZ_ASSERT(dest.reg != ScratchRegister); + mov(ImmWord(imm.value), ScratchRegister); + as_dmultu(dest.reg, ScratchRegister); + as_mflo(dest.reg); +} + +void +MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp) +{ + MOZ_ASSERT(temp == InvalidReg); + mul64(imm, dest); +} + +void +MacroAssembler::mul64(const Register64& src, const Register64& dest, const Register temp) +{ + MOZ_ASSERT(temp == InvalidReg); + as_dmultu(dest.reg, src.reg); + as_mflo(dest.reg); +} + +void +MacroAssembler::mul64(const Operand& src, const Register64& dest, const Register temp) +{ + if (src.getTag() == Operand::MEM) { + Register64 scratch(ScratchRegister); + + load64(src.toAddress(), scratch); + mul64(scratch, dest, temp); + } else { + mul64(Register64(src.toReg()), dest, temp); + } +} + +void +MacroAssembler::mulBy3(Register src, Register dest) +{ + as_daddu(dest, src, src); + as_daddu(dest, dest, src); +} + +void +MacroAssembler::inc64(AbsoluteAddress dest) +{ + ma_li(ScratchRegister, ImmWord(uintptr_t(dest.addr))); + as_ld(SecondScratchReg, ScratchRegister, 0); + as_daddiu(SecondScratchReg, SecondScratchReg, 1); + as_sd(SecondScratchReg, ScratchRegister, 0); +} + +void +MacroAssembler::neg64(Register64 reg) +{ + as_dsubu(reg.reg, zero, reg.reg); +} + +// =============================================================== +// Shift functions + +void +MacroAssembler::lshiftPtr(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + ma_dsll(dest, dest, imm); +} + +void +MacroAssembler::lshift64(Imm32 imm, Register64 dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + ma_dsll(dest.reg, dest.reg, imm); +} + +void +MacroAssembler::lshift64(Register shift, Register64 dest) +{ + ma_dsll(dest.reg, dest.reg, shift); +} + +void +MacroAssembler::rshiftPtr(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + ma_dsrl(dest, dest, imm); +} + +void +MacroAssembler::rshift64(Imm32 imm, Register64 dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + ma_dsrl(dest.reg, dest.reg, imm); +} + +void +MacroAssembler::rshift64(Register shift, Register64 dest) +{ + ma_dsrl(dest.reg, dest.reg, shift); +} + +void +MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + ma_dsra(dest, dest, imm); +} + +void +MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) +{ + MOZ_ASSERT(0 <= imm.value && imm.value < 64); + ma_dsra(dest.reg, dest.reg, imm); +} + +void +MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) +{ + ma_dsra(dest.reg, dest.reg, shift); +} + +// =============================================================== +// Rotation functions + +void +MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest, Register temp) +{ + MOZ_ASSERT(temp == InvalidReg); + + if (count.value) + ma_drol(dest.reg, src.reg, count); + else + ma_move(dest.reg, src.reg); +} + +void +MacroAssembler::rotateLeft64(Register count, Register64 src, Register64 dest, Register temp) +{ + MOZ_ASSERT(temp == InvalidReg); + ma_drol(dest.reg, src.reg, count); +} + +void +MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest, Register temp) +{ + MOZ_ASSERT(temp == InvalidReg); + + if (count.value) + ma_dror(dest.reg, src.reg, count); + else + ma_move(dest.reg, src.reg); +} + +void +MacroAssembler::rotateRight64(Register count, Register64 src, Register64 dest, Register temp) +{ + MOZ_ASSERT(temp == InvalidReg); + ma_dror(dest.reg, src.reg, count); +} + +// =============================================================== +// Condition functions + +template <typename T1, typename T2> +void +MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) +{ + ma_cmp_set(dest, lhs, rhs, cond); +} + +// Also see below for specializations of cmpPtrSet. + +template <typename T1, typename T2> +void +MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) +{ + ma_cmp_set(dest, lhs, rhs, cond); +} + +// =============================================================== +// Bit counting functions + +void +MacroAssembler::clz64(Register64 src, Register dest) +{ + as_dclz(dest, src.reg); +} + +void +MacroAssembler::ctz64(Register64 src, Register dest) +{ + ma_dctz(dest, src.reg); +} + +void +MacroAssembler::popcnt64(Register64 input, Register64 output, Register tmp) +{ + ma_move(output.reg, input.reg); + ma_dsra(tmp, input.reg, Imm32(1)); + ma_li(ScratchRegister, ImmWord(0x5555555555555555UL)); + ma_and(tmp, ScratchRegister); + ma_dsubu(output.reg, tmp); + ma_dsra(tmp, output.reg, Imm32(2)); + ma_li(ScratchRegister, ImmWord(0x3333333333333333UL)); + ma_and(output.reg, ScratchRegister); + ma_and(tmp, ScratchRegister); + ma_daddu(output.reg, tmp); + ma_dsrl(tmp, output.reg, Imm32(4)); + ma_daddu(output.reg, tmp); + ma_li(ScratchRegister, ImmWord(0xF0F0F0F0F0F0F0FUL)); + ma_and(output.reg, ScratchRegister); + ma_dsll(tmp, output.reg, Imm32(8)); + ma_daddu(output.reg, tmp); + ma_dsll(tmp, output.reg, Imm32(16)); + ma_daddu(output.reg, tmp); + ma_dsll(tmp, output.reg, Imm32(32)); + ma_daddu(output.reg, tmp); + ma_dsra(output.reg, output.reg, Imm32(56)); +} + +// =============================================================== +// Branch functions + +void +MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val, Label* success, Label* fail) +{ + MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal || + cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual || + cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual || + cond == Assembler::Below || cond == Assembler::BelowOrEqual || + cond == Assembler::Above || cond == Assembler::AboveOrEqual, + "other condition codes not supported"); + + branchPtr(cond, lhs.reg, ImmWord(val.value), success); + if (fail) + jump(fail); +} + +void +MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success, Label* fail) +{ + MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal || + cond == Assembler::LessThan || cond == Assembler::LessThanOrEqual || + cond == Assembler::GreaterThan || cond == Assembler::GreaterThanOrEqual || + cond == Assembler::Below || cond == Assembler::BelowOrEqual || + cond == Assembler::Above || cond == Assembler::AboveOrEqual, + "other condition codes not supported"); + + branchPtr(cond, lhs.reg, rhs.reg, success); + if (fail) + jump(fail); +} + +void +MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) +{ + MOZ_ASSERT(cond == Assembler::NotEqual, + "other condition codes not supported"); + + branchPtr(cond, lhs, ImmWord(val.value), label); +} + +void +MacroAssembler::branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::NotEqual, + "other condition codes not supported"); + MOZ_ASSERT(lhs.base != scratch); + MOZ_ASSERT(rhs.base != scratch); + + loadPtr(rhs, scratch); + branchPtr(cond, lhs, scratch, label); +} + +void +MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) +{ + if (rhs != ScratchRegister) + movePtr(rhs, ScratchRegister); + // Instead of unboxing lhs, box rhs and do direct comparison with lhs. + rshiftPtr(Imm32(1), ScratchRegister); + branchPtr(cond, lhs, ScratchRegister, label); +} + +template <class L> +void +MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp, + L label) +{ + branchTestPtr(cond, lhs.reg, rhs.reg, label); +} + +void +MacroAssembler::branchTestUndefined(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestUndefined(cond, scratch2, label); +} + +void +MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestInt32(cond, scratch2, label); +} + +void +MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value, Label* label) +{ + ScratchRegisterScope scratch(*this); + ma_dext(scratch, value.valueReg(), Imm32(0), Imm32(32)); + ma_b(scratch, scratch, label, b ? NonZero : Zero); +} + +void +MacroAssembler::branchTestDouble(Condition cond, Register tag, Label* label) +{ + MOZ_ASSERT(cond == Equal || cond == NotEqual); + Condition actual = (cond == Equal) ? BelowOrEqual : Above; + ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual); +} + +void +MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestDouble(cond, scratch2, label); +} + +void +MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestNumber(cond, scratch2, label); +} + +void +MacroAssembler::branchTestBoolean(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestBoolean(cond, scratch2, label); +} + +void +MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + unboxBoolean(value, scratch2); + ma_b(scratch2, scratch2, label, b ? NonZero : Zero); +} + +void +MacroAssembler::branchTestString(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestString(cond, scratch2, label); +} + +void +MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + unboxString(value, scratch2); + load32(Address(scratch2, JSString::offsetOfLength()), scratch2); + ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal); +} + +void +MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestSymbol(cond, scratch2, label); +} + +void +MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestNull(cond, scratch2, label); +} + +void +MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestObject(cond, scratch2, label); +} + +void +MacroAssembler::branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + branchTestPrimitive(cond, scratch2, label); +} + +template <class L> +void +MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value, L label) +{ + SecondScratchRegisterScope scratch2(*this); + splitTag(value, scratch2); + ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond); +} + +void +MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label) +{ + uint64_t magic = MagicValue(why).asRawBits(); + ScratchRegisterScope scratch(*this); + loadPtr(valaddr, scratch); + ma_b(scratch, ImmWord(magic), label, cond); +} + +// ======================================================================== +// Memory access primitives. +void +MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr) +{ + ma_sd(src, addr); +} +void +MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr) +{ + MOZ_ASSERT(addr.offset == 0); + ma_sd(src, addr); +} + +void +MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr) +{ + ma_ss(src, addr); +} +void +MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr) +{ + MOZ_ASSERT(addr.offset == 0); + ma_ss(src, addr); +} + +// ======================================================================== +// wasm support + +template <class L> +void +MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label) +{ + BufferOffset bo = ma_BoundsCheck(ScratchRegister); + append(wasm::BoundsCheck(bo.getOffset())); + + ma_b(index, ScratchRegister, label, cond); +} + +void +MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) +{ + // Replace with new value + Assembler::UpdateLoad64Value((Instruction*) patchAt, limit); +} + +//}}} check_macroassembler_style +// =============================================================== + +// The specializations for cmpPtrSet are outside the braces because check_macroassembler_style can't yet +// deal with specializations. + +template<> +inline void +MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs, + Register dest) +{ + loadPtr(lhs, ScratchRegister); + movePtr(rhs, SecondScratchReg); + cmpPtrSet(cond, ScratchRegister, SecondScratchReg, dest); +} + +template<> +inline void +MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs, + Register dest) +{ + loadPtr(rhs, ScratchRegister); + cmpPtrSet(cond, lhs, ScratchRegister, dest); +} + +template<> +inline void +MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, + Register dest) +{ + load32(rhs, ScratchRegister); + cmp32Set(cond, lhs, ScratchRegister, dest); +} + +void +MacroAssemblerMIPS64Compat::incrementInt32Value(const Address& addr) +{ + asMasm().add32(Imm32(1), addr); +} + +void +MacroAssemblerMIPS64Compat::computeEffectiveAddress(const BaseIndex& address, Register dest) +{ + computeScaledAddress(address, dest); + if (address.offset) + asMasm().addPtr(Imm32(address.offset), dest); +} + +void +MacroAssemblerMIPS64Compat::retn(Imm32 n) +{ + // pc <- [sp]; sp += n + loadPtr(Address(StackPointer, 0), ra); + asMasm().addPtr(n, StackPointer); + as_jr(ra); + as_nop(); +} + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_MacroAssembler_mips64_inl_h */ diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp new file mode 100644 index 000000000..329fa83f8 --- /dev/null +++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp @@ -0,0 +1,2485 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/MacroAssembler-mips64.h" + +#include "mozilla/DebugOnly.h" +#include "mozilla/MathAlgorithms.h" + +#include "jit/Bailouts.h" +#include "jit/BaselineFrame.h" +#include "jit/JitFrames.h" +#include "jit/MacroAssembler.h" +#include "jit/mips64/Simulator-mips64.h" +#include "jit/MoveEmitter.h" +#include "jit/SharedICRegisters.h" + +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace jit; + +using mozilla::Abs; + +static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean."); + +void +MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src, Register dest) +{ + // Note that C++ bool is only 1 byte, so zero extend it to clear the + // higher-order bits. + ma_and(dest, src, Imm32(0xff)); +} + +void +MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src, FloatRegister dest) +{ + as_mtc1(src, dest); + as_cvtdw(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src, FloatRegister dest) +{ + ma_ls(dest, src); + as_cvtdw(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) +{ + computeScaledAddress(src, ScratchRegister); + convertInt32ToDouble(Address(ScratchRegister, src.offset), dest); +} + +void +MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src, FloatRegister dest) +{ + // We use SecondScratchDoubleReg because MacroAssembler::loadFromTypedArray + // calls with ScratchDoubleReg as dest. + MOZ_ASSERT(dest != SecondScratchDoubleReg); + + // Subtract INT32_MIN to get a positive number + ma_subu(ScratchRegister, src, Imm32(INT32_MIN)); + + // Convert value + as_mtc1(ScratchRegister, dest); + as_cvtdw(dest, dest); + + // Add unsigned value of INT32_MIN + ma_lid(SecondScratchDoubleReg, 2147483648.0); + as_addd(dest, dest, SecondScratchDoubleReg); +} + +void +MacroAssemblerMIPS64Compat::convertInt64ToDouble(Register src, FloatRegister dest) +{ + as_dmtc1(src, dest); + as_cvtdl(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::convertInt64ToFloat32(Register src, FloatRegister dest) +{ + as_dmtc1(src, dest); + as_cvtsl(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src, FloatRegister dest) +{ + Label positive, done; + ma_b(src, src, &positive, NotSigned, ShortJump); + + MOZ_ASSERT(src!= ScratchRegister); + MOZ_ASSERT(src!= SecondScratchReg); + + ma_and(ScratchRegister, src, Imm32(1)); + ma_dsrl(SecondScratchReg, src, Imm32(1)); + ma_or(ScratchRegister, SecondScratchReg); + as_dmtc1(ScratchRegister, dest); + as_cvtdl(dest, dest); + asMasm().addDouble(dest, dest); + ma_b(&done, ShortJump); + + bind(&positive); + as_dmtc1(src, dest); + as_cvtdl(dest, dest); + + bind(&done); +} + +void +MacroAssemblerMIPS64Compat::convertUInt64ToFloat32(Register src, FloatRegister dest) +{ + Label positive, done; + ma_b(src, src, &positive, NotSigned, ShortJump); + + MOZ_ASSERT(src!= ScratchRegister); + MOZ_ASSERT(src!= SecondScratchReg); + + ma_and(ScratchRegister, src, Imm32(1)); + ma_dsrl(SecondScratchReg, src, Imm32(1)); + ma_or(ScratchRegister, SecondScratchReg); + as_dmtc1(ScratchRegister, dest); + as_cvtsl(dest, dest); + asMasm().addFloat32(dest, dest); + ma_b(&done, ShortJump); + + bind(&positive); + as_dmtc1(src, dest); + as_cvtsl(dest, dest); + + bind(&done); +} + +bool +MacroAssemblerMIPS64Compat::convertUInt64ToDoubleNeedsTemp() +{ + return false; +} + +void +MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp) +{ + convertUInt64ToDouble(src.reg, dest); +} + +void +MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src, FloatRegister dest) +{ + Label positive, done; + ma_b(src, src, &positive, NotSigned, ShortJump); + + // We cannot do the same as convertUInt32ToDouble because float32 doesn't + // have enough precision. + convertUInt32ToDouble(src, dest); + convertDoubleToFloat32(dest, dest); + ma_b(&done, ShortJump); + + bind(&positive); + convertInt32ToFloat32(src, dest); + + bind(&done); +} + +void +MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src, FloatRegister dest) +{ + as_cvtsd(dest, src); +} + +// Checks whether a double is representable as a 32-bit integer. If so, the +// integer is written to the output register. Otherwise, a bailout is taken to +// the given snapshot. This function overwrites the scratch float register. +void +MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src, Register dest, + Label* fail, bool negativeZeroCheck) +{ + if (negativeZeroCheck) { + moveFromDouble(src, dest); + ma_drol(dest, dest, Imm32(1)); + ma_b(dest, Imm32(1), fail, Assembler::Equal); + } + + // Convert double to int, then convert back and check if we have the + // same number. + as_cvtwd(ScratchDoubleReg, src); + as_mfc1(dest, ScratchDoubleReg); + as_cvtdw(ScratchDoubleReg, ScratchDoubleReg); + ma_bc1d(src, ScratchDoubleReg, fail, Assembler::DoubleNotEqualOrUnordered); +} + +// Checks whether a float32 is representable as a 32-bit integer. If so, the +// integer is written to the output register. Otherwise, a bailout is taken to +// the given snapshot. This function overwrites the scratch float register. +void +MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src, Register dest, + Label* fail, bool negativeZeroCheck) +{ + if (negativeZeroCheck) { + moveFromFloat32(src, dest); + ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal); + } + + // Converting the floating point value to an integer and then converting it + // back to a float32 would not work, as float to int32 conversions are + // clamping (e.g. float(INT32_MAX + 1) would get converted into INT32_MAX + // and then back to float(INT32_MAX + 1)). If this ever happens, we just + // bail out. + as_cvtws(ScratchFloat32Reg, src); + as_mfc1(dest, ScratchFloat32Reg); + as_cvtsw(ScratchFloat32Reg, ScratchFloat32Reg); + ma_bc1s(src, ScratchFloat32Reg, fail, Assembler::DoubleNotEqualOrUnordered); + + // Bail out in the clamped cases. + ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal); +} + +void +MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src, FloatRegister dest) +{ + as_cvtds(dest, src); +} + +void +MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src, FloatRegister dest) +{ + as_mtc1(src, dest); + as_cvtsw(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src, FloatRegister dest) +{ + ma_ls(dest, src); + as_cvtsw(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::movq(Register rs, Register rd) +{ + ma_move(rd, rs); +} + +void +MacroAssemblerMIPS64::ma_li(Register dest, CodeOffset* label) +{ + BufferOffset bo = m_buffer.nextOffset(); + ma_liPatchable(dest, ImmWord(/* placeholder */ 0)); + label->bind(bo.getOffset()); +} + +void +MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm) +{ + int64_t value = imm.value; + + if (value >= INT16_MIN && value <= INT16_MAX) { + as_addiu(dest, zero, value); + } else if (imm.value <= UINT16_MAX) { + as_ori(dest, zero, Imm16::Lower(Imm32(value)).encode()); + } else if (value >= INT32_MIN && value <= INT32_MAX) { + as_lui(dest, Imm16::Upper(Imm32(value)).encode()); + if (value & 0xffff) + as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode()); + } else if (imm.value <= UINT32_MAX) { + as_lui(dest, Imm16::Upper(Imm32(value)).encode()); + if (value & 0xffff) + as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode()); + as_dinsu(dest, zero, 32, 32); + } else { + uint64_t high = imm.value >> 32; + + if (imm.value >> 48) { + as_lui(dest, Imm16::Upper(Imm32(high)).encode()); + if (high & 0xffff) + as_ori(dest, dest, Imm16::Lower(Imm32(high)).encode()); + as_dsll(dest, dest, 16); + } else { + as_lui(dest, Imm16::Lower(Imm32(high)).encode()); + } + if ((imm.value >> 16) & 0xffff) + as_ori(dest, dest, Imm16::Upper(Imm32(value)).encode()); + as_dsll(dest, dest, 16); + if (value & 0xffff) + as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode()); + } +} + +// This method generates lui, dsll and ori instruction block that can be modified +// by UpdateLoad64Value, either during compilation (eg. Assembler::bind), or +// during execution (eg. jit::PatchJump). +void +MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) +{ + return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value))); +} + +void +MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm, LiFlags flags) +{ + if (Li64 == flags) { + m_buffer.ensureSpace(6 * sizeof(uint32_t)); + as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode()); + as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode()); + as_dsll(dest, dest, 16); + as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode()); + as_dsll(dest, dest, 16); + as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode()); + } else { + m_buffer.ensureSpace(4 * sizeof(uint32_t)); + as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode()); + as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode()); + as_drotr32(dest, dest, 48); + as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode()); + } +} + +void +MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs) +{ + as_dsubu(rd, zero, rs); +} + +// Shifts +void +MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) +{ + if (31 < shift.value) + as_dsll32(rd, rt, shift.value); + else + as_dsll(rd, rt, shift.value); +} + +void +MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) +{ + if (31 < shift.value) + as_dsrl32(rd, rt, shift.value); + else + as_dsrl(rd, rt, shift.value); +} + +void +MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) +{ + if (31 < shift.value) + as_dsra32(rd, rt, shift.value); + else + as_dsra(rd, rt, shift.value); +} + +void +MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) +{ + if (31 < shift.value) + as_drotr32(rd, rt, shift.value); + else + as_drotr(rd, rt, shift.value); +} + +void +MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) +{ + uint32_t s = 64 - shift.value; + + if (31 < s) + as_drotr32(rd, rt, s); + else + as_drotr(rd, rt, s); +} + +void +MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) +{ + as_dsllv(rd, rt, shift); +} + +void +MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift) +{ + as_dsrlv(rd, rt, shift); +} + +void +MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift) +{ + as_dsrav(rd, rt, shift); +} + +void +MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift) +{ + as_drotrv(rd, rt, shift); +} + +void +MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift) +{ + ma_negu(ScratchRegister, shift); + as_drotrv(rd, rt, ScratchRegister); +} + +void +MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size) +{ + if (pos.value >= 0 && pos.value < 32) { + if (pos.value + size.value > 32) + as_dinsm(rt, rs, pos.value, size.value); + else + as_dins(rt, rs, pos.value, size.value); + } else { + as_dinsu(rt, rs, pos.value, size.value); + } +} + +void +MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size) +{ + if (pos.value >= 0 && pos.value < 32) { + if (size.value > 32) + as_dextm(rt, rs, pos.value, size.value); + else + as_dext(rt, rs, pos.value, size.value); + } else { + as_dextu(rt, rs, pos.value, size.value); + } +} + +void +MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) +{ + ma_dnegu(ScratchRegister, rs); + as_and(rd, ScratchRegister, rs); + as_dclz(rd, rd); + ma_dnegu(SecondScratchReg, rd); + ma_daddu(SecondScratchReg, Imm32(0x3f)); + as_movn(rd, SecondScratchReg, ScratchRegister); +} + +// Arithmetic-based ops. + +// Add. +void +MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm) +{ + if (Imm16::IsInSignedRange(imm.value)) { + as_daddiu(rd, rs, imm.value); + } else { + ma_li(ScratchRegister, imm); + as_daddu(rd, rs, ScratchRegister); + } +} + +void +MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs) +{ + as_daddu(rd, rd, rs); +} + +void +MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm) +{ + ma_daddu(rd, rd, imm); +} + +template <typename L> +void +MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow) +{ + as_daddu(SecondScratchReg, rs, rt); + as_addu(rd, rs, rt); + ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual); +} + +template void +MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs, + Register rt, Label* overflow); +template void +MacroAssemblerMIPS64::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Register rt, + wasm::TrapDesc overflow); + +template <typename L> +void +MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow) +{ + // Check for signed range because of as_daddiu + if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) { + as_daddiu(SecondScratchReg, rs, imm.value); + as_addiu(rd, rs, imm.value); + ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual); + } else { + ma_li(ScratchRegister, imm); + ma_addTestOverflow(rd, rs, ScratchRegister, overflow); + } +} + +template void +MacroAssemblerMIPS64::ma_addTestOverflow<Label*>(Register rd, Register rs, + Imm32 imm, Label* overflow); +template void +MacroAssemblerMIPS64::ma_addTestOverflow<wasm::TrapDesc>(Register rd, Register rs, Imm32 imm, + wasm::TrapDesc overflow); + +// Subtract. +void +MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm) +{ + if (Imm16::IsInSignedRange(-imm.value)) { + as_daddiu(rd, rs, -imm.value); + } else { + ma_li(ScratchRegister, imm); + as_dsubu(rd, rs, ScratchRegister); + } +} + +void +MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs) +{ + as_dsubu(rd, rd, rs); +} + +void +MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm) +{ + ma_dsubu(rd, rd, imm); +} + +void +MacroAssemblerMIPS64::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow) +{ + as_dsubu(SecondScratchReg, rs, rt); + as_subu(rd, rs, rt); + ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual); +} + +void +MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm) +{ + ma_li(ScratchRegister, imm); + as_dmult(rs, ScratchRegister); +} + +// Memory. + +void +MacroAssemblerMIPS64::ma_load(Register dest, Address address, + LoadStoreSize size, LoadStoreExtension extension) +{ + int16_t encodedOffset; + Register base; + + if (isLoongson() && ZeroExtend != extension && + !Imm16::IsInSignedRange(address.offset)) + { + ma_li(ScratchRegister, Imm32(address.offset)); + base = address.base; + + switch (size) { + case SizeByte: + as_gslbx(dest, base, ScratchRegister, 0); + break; + case SizeHalfWord: + as_gslhx(dest, base, ScratchRegister, 0); + break; + case SizeWord: + as_gslwx(dest, base, ScratchRegister, 0); + break; + case SizeDouble: + as_gsldx(dest, base, ScratchRegister, 0); + break; + default: + MOZ_CRASH("Invalid argument for ma_load"); + } + return; + } + + if (!Imm16::IsInSignedRange(address.offset)) { + ma_li(ScratchRegister, Imm32(address.offset)); + as_daddu(ScratchRegister, address.base, ScratchRegister); + base = ScratchRegister; + encodedOffset = Imm16(0).encode(); + } else { + encodedOffset = Imm16(address.offset).encode(); + base = address.base; + } + + switch (size) { + case SizeByte: + if (ZeroExtend == extension) + as_lbu(dest, base, encodedOffset); + else + as_lb(dest, base, encodedOffset); + break; + case SizeHalfWord: + if (ZeroExtend == extension) + as_lhu(dest, base, encodedOffset); + else + as_lh(dest, base, encodedOffset); + break; + case SizeWord: + if (ZeroExtend == extension) + as_lwu(dest, base, encodedOffset); + else + as_lw(dest, base, encodedOffset); + break; + case SizeDouble: + as_ld(dest, base, encodedOffset); + break; + default: + MOZ_CRASH("Invalid argument for ma_load"); + } +} + +void +MacroAssemblerMIPS64::ma_store(Register data, Address address, LoadStoreSize size, + LoadStoreExtension extension) +{ + int16_t encodedOffset; + Register base; + + if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) { + ma_li(ScratchRegister, Imm32(address.offset)); + base = address.base; + + switch (size) { + case SizeByte: + as_gssbx(data, base, ScratchRegister, 0); + break; + case SizeHalfWord: + as_gsshx(data, base, ScratchRegister, 0); + break; + case SizeWord: + as_gsswx(data, base, ScratchRegister, 0); + break; + case SizeDouble: + as_gssdx(data, base, ScratchRegister, 0); + break; + default: + MOZ_CRASH("Invalid argument for ma_store"); + } + return; + } + + if (!Imm16::IsInSignedRange(address.offset)) { + ma_li(ScratchRegister, Imm32(address.offset)); + as_daddu(ScratchRegister, address.base, ScratchRegister); + base = ScratchRegister; + encodedOffset = Imm16(0).encode(); + } else { + encodedOffset = Imm16(address.offset).encode(); + base = address.base; + } + + switch (size) { + case SizeByte: + as_sb(data, base, encodedOffset); + break; + case SizeHalfWord: + as_sh(data, base, encodedOffset); + break; + case SizeWord: + as_sw(data, base, encodedOffset); + break; + case SizeDouble: + as_sd(data, base, encodedOffset); + break; + default: + MOZ_CRASH("Invalid argument for ma_store"); + } +} + +void +MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address, Register dest) +{ + int32_t shift = Imm32::ShiftOf(address.scale).value; + if (shift) { + ma_dsll(ScratchRegister, address.index, Imm32(shift)); + as_daddu(dest, address.base, ScratchRegister); + } else { + as_daddu(dest, address.base, address.index); + } +} + +// Shortcut for when we know we're transferring 32 bits of data. +void +MacroAssemblerMIPS64::ma_pop(Register r) +{ + as_ld(r, StackPointer, 0); + as_daddiu(StackPointer, StackPointer, sizeof(intptr_t)); +} + +void +MacroAssemblerMIPS64::ma_push(Register r) +{ + if (r == sp) { + // Pushing sp requires one more instruction. + ma_move(ScratchRegister, sp); + r = ScratchRegister; + } + + as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(intptr_t)); + as_sd(r, StackPointer, 0); +} + +// Branches when done from within mips-specific code. +void +MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label, Condition c, JumpKind jumpKind) +{ + MOZ_ASSERT(c != Overflow); + if (imm.value == 0) { + if (c == Always || c == AboveOrEqual) + ma_b(label, jumpKind); + else if (c == Below) + ; // This condition is always false. No branch required. + else + branchWithCode(getBranchCode(lhs, c), label, jumpKind); + } else { + MOZ_ASSERT(lhs != ScratchRegister); + ma_li(ScratchRegister, imm); + ma_b(lhs, ScratchRegister, label, c, jumpKind); + } +} + +void +MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label, Condition c, JumpKind jumpKind) +{ + MOZ_ASSERT(lhs != ScratchRegister); + ma_load(ScratchRegister, addr, SizeDouble); + ma_b(lhs, ScratchRegister, label, c, jumpKind); +} + +void +MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label, Condition c, JumpKind jumpKind) +{ + ma_load(SecondScratchReg, addr, SizeDouble); + ma_b(SecondScratchReg, imm, label, c, jumpKind); +} + +void +MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, JumpKind jumpKind) +{ + ma_load(SecondScratchReg, addr, SizeDouble); + ma_b(SecondScratchReg, imm, label, c, jumpKind); +} + +void +MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill) +{ + if (label->bound()) { + // Generate the long jump for calls because return address has to be + // the address after the reserved block. + addLongJump(nextOffset()); + ma_liPatchable(ScratchRegister, ImmWord(label->offset())); + as_jalr(ScratchRegister); + if (delaySlotFill == FillDelaySlot) + as_nop(); + return; + } + + // Second word holds a pointer to the next branch in label's chain. + uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET; + + // Make the whole branch continous in the buffer. The '6' + // instructions are writing at below (contain delay slot). + m_buffer.ensureSpace(6 * sizeof(uint32_t)); + + BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode()); + writeInst(nextInChain); + if (!oom()) + label->use(bo.getOffset()); + // Leave space for long jump. + as_nop(); + as_nop(); + as_nop(); + if (delaySlotFill == FillDelaySlot) + as_nop(); +} + +void +MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind) +{ + MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode()); + InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); + + if (label->bound()) { + int32_t offset = label->offset() - m_buffer.nextOffset().getOffset(); + + if (BOffImm16::IsInRange(offset)) + jumpKind = ShortJump; + + if (jumpKind == ShortJump) { + MOZ_ASSERT(BOffImm16::IsInRange(offset)); + code.setBOffImm16(BOffImm16(offset)); + writeInst(code.encode()); + as_nop(); + return; + } + + if (code.encode() == inst_beq.encode()) { + // Handle long jump + addLongJump(nextOffset()); + ma_liPatchable(ScratchRegister, ImmWord(label->offset())); + as_jr(ScratchRegister); + as_nop(); + return; + } + + // Handle long conditional branch, the target offset is based on self, + // point to next instruction of nop at below. + writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode()); + // No need for a "nop" here because we can clobber scratch. + addLongJump(nextOffset()); + ma_liPatchable(ScratchRegister, ImmWord(label->offset())); + as_jr(ScratchRegister); + as_nop(); + return; + } + + // Generate open jump and link it to a label. + + // Second word holds a pointer to the next branch in label's chain. + uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET; + + if (jumpKind == ShortJump) { + // Make the whole branch continous in the buffer. + m_buffer.ensureSpace(2 * sizeof(uint32_t)); + + // Indicate that this is short jump with offset 4. + code.setBOffImm16(BOffImm16(4)); + BufferOffset bo = writeInst(code.encode()); + writeInst(nextInChain); + if (!oom()) + label->use(bo.getOffset()); + return; + } + + bool conditional = code.encode() != inst_beq.encode(); + + // Make the whole branch continous in the buffer. The '7' + // instructions are writing at below (contain conditional nop). + m_buffer.ensureSpace(7 * sizeof(uint32_t)); + + BufferOffset bo = writeInst(code.encode()); + writeInst(nextInChain); + if (!oom()) + label->use(bo.getOffset()); + // Leave space for potential long jump. + as_nop(); + as_nop(); + as_nop(); + as_nop(); + if (conditional) + as_nop(); +} + +void +MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c) +{ + ma_li(ScratchRegister, imm); + ma_cmp_set(rd, rs, ScratchRegister, c); +} + +void +MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm, Condition c) +{ + ma_li(ScratchRegister, ImmWord(uintptr_t(imm.value))); + ma_cmp_set(rd, rs, ScratchRegister, c); +} + +// fp instructions +void +MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value) +{ + ImmWord imm(mozilla::BitwiseCast<uint64_t>(value)); + + ma_li(ScratchRegister, imm); + moveToDouble(ScratchRegister, dest); +} + +void +MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest) +{ + as_dmfc1(dest.valueReg(), src); +} + +void +MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest) +{ + as_dmtc1(src.valueReg(), dest); +} + +void +MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address) +{ + if (Imm16::IsInSignedRange(address.offset)) { + as_ls(ft, address.base, address.offset); + } else { + MOZ_ASSERT(address.base != ScratchRegister); + ma_li(ScratchRegister, Imm32(address.offset)); + if (isLoongson()) { + as_gslsx(ft, address.base, ScratchRegister, 0); + } else { + as_daddu(ScratchRegister, address.base, ScratchRegister); + as_ls(ft, ScratchRegister, 0); + } + } +} + +void +MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address) +{ + if (Imm16::IsInSignedRange(address.offset)) { + as_ld(ft, address.base, address.offset); + } else { + MOZ_ASSERT(address.base != ScratchRegister); + ma_li(ScratchRegister, Imm32(address.offset)); + if (isLoongson()) { + as_gsldx(ft, address.base, ScratchRegister, 0); + } else { + as_daddu(ScratchRegister, address.base, ScratchRegister); + as_ld(ft, ScratchRegister, 0); + } + } +} + +void +MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address) +{ + if (Imm16::IsInSignedRange(address.offset)) { + as_sd(ft, address.base, address.offset); + } else { + MOZ_ASSERT(address.base != ScratchRegister); + ma_li(ScratchRegister, Imm32(address.offset)); + if (isLoongson()) { + as_gssdx(ft, address.base, ScratchRegister, 0); + } else { + as_daddu(ScratchRegister, address.base, ScratchRegister); + as_sd(ft, ScratchRegister, 0); + } + } +} + +void +MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address) +{ + if (Imm16::IsInSignedRange(address.offset)) { + as_ss(ft, address.base, address.offset); + } else { + MOZ_ASSERT(address.base != ScratchRegister); + ma_li(ScratchRegister, Imm32(address.offset)); + if (isLoongson()) { + as_gsssx(ft, address.base, ScratchRegister, 0); + } else { + as_daddu(ScratchRegister, address.base, ScratchRegister); + as_ss(ft, ScratchRegister, 0); + } + } +} + +void +MacroAssemblerMIPS64::ma_pop(FloatRegister fs) +{ + ma_ld(fs, Address(StackPointer, 0)); + as_daddiu(StackPointer, StackPointer, sizeof(double)); +} + +void +MacroAssemblerMIPS64::ma_push(FloatRegister fs) +{ + as_daddiu(StackPointer, StackPointer, (int32_t)-sizeof(double)); + ma_sd(fs, Address(StackPointer, 0)); +} + +bool +MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) +{ + uint32_t descriptor = MakeFrameDescriptor(asMasm().framePushed(), JitFrame_IonJS, + ExitFrameLayout::Size()); + + asMasm().Push(Imm32(descriptor)); // descriptor_ + asMasm().Push(ImmPtr(fakeReturnAddr)); + + return true; +} + +void +MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest) +{ + ma_li(dest, imm); +} + +void +MacroAssemblerMIPS64Compat::move32(Register src, Register dest) +{ + ma_move(dest, src); +} + +void +MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest) +{ + ma_move(dest, src); +} +void +MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest) +{ + ma_li(dest, imm); +} + +void +MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest) +{ + ma_li(dest, imm); +} + +void +MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) +{ + movePtr(ImmWord(uintptr_t(imm.value)), dest); +} +void +MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm, Register dest) +{ + append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm)); + ma_liPatchable(dest, ImmWord(-1)); +} + +void +MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address, Register dest) +{ + ma_load(dest, address, SizeByte, ZeroExtend); +} + +void +MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src, Register dest) +{ + ma_load(dest, src, SizeByte, ZeroExtend); +} + +void +MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address, Register dest) +{ + ma_load(dest, address, SizeByte, SignExtend); +} + +void +MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src, Register dest) +{ + ma_load(dest, src, SizeByte, SignExtend); +} + +void +MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address, Register dest) +{ + ma_load(dest, address, SizeHalfWord, ZeroExtend); +} + +void +MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src, Register dest) +{ + ma_load(dest, src, SizeHalfWord, ZeroExtend); +} + +void +MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address, Register dest) +{ + ma_load(dest, address, SizeHalfWord, SignExtend); +} + +void +MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src, Register dest) +{ + ma_load(dest, src, SizeHalfWord, SignExtend); +} + +void +MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest) +{ + ma_load(dest, address, SizeWord); +} + +void +MacroAssemblerMIPS64Compat::load32(const BaseIndex& address, Register dest) +{ + ma_load(dest, address, SizeWord); +} + +void +MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address, Register dest) +{ + movePtr(ImmPtr(address.addr), ScratchRegister); + load32(Address(ScratchRegister, 0), dest); +} + +void +MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address, Register dest) +{ + movePtr(address, ScratchRegister); + load32(Address(ScratchRegister, 0), dest); +} + +void +MacroAssemblerMIPS64Compat::loadPtr(const Address& address, Register dest) +{ + ma_load(dest, address, SizeDouble); +} + +void +MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest) +{ + ma_load(dest, src, SizeDouble); +} + +void +MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address, Register dest) +{ + movePtr(ImmPtr(address.addr), ScratchRegister); + loadPtr(Address(ScratchRegister, 0), dest); +} + +void +MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address, Register dest) +{ + movePtr(address, ScratchRegister); + loadPtr(Address(ScratchRegister, 0), dest); +} + +void +MacroAssemblerMIPS64Compat::loadPrivate(const Address& address, Register dest) +{ + loadPtr(address, dest); + ma_dsll(dest, dest, Imm32(1)); +} + +void +MacroAssemblerMIPS64Compat::loadDouble(const Address& address, FloatRegister dest) +{ + ma_ld(dest, address); +} + +void +MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest) +{ + computeScaledAddress(src, SecondScratchReg); + ma_ld(dest, Address(SecondScratchReg, src.offset)); +} + +void +MacroAssemblerMIPS64Compat::loadUnalignedDouble(const BaseIndex& src, Register temp, + FloatRegister dest) +{ + computeScaledAddress(src, SecondScratchReg); + + if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) { + as_ldl(temp, SecondScratchReg, src.offset + 7); + as_ldr(temp, SecondScratchReg, src.offset); + } else { + ma_li(ScratchRegister, Imm32(src.offset)); + as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); + as_ldl(temp, ScratchRegister, 7); + as_ldr(temp, ScratchRegister, 0); + } + + moveToDouble(temp, dest); +} + +void +MacroAssemblerMIPS64Compat::loadFloatAsDouble(const Address& address, FloatRegister dest) +{ + ma_ls(dest, address); + as_cvtds(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::loadFloatAsDouble(const BaseIndex& src, FloatRegister dest) +{ + loadFloat32(src, dest); + as_cvtds(dest, dest); +} + +void +MacroAssemblerMIPS64Compat::loadFloat32(const Address& address, FloatRegister dest) +{ + ma_ls(dest, address); +} + +void +MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest) +{ + computeScaledAddress(src, SecondScratchReg); + ma_ls(dest, Address(SecondScratchReg, src.offset)); +} + +void +MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const BaseIndex& src, Register temp, + FloatRegister dest) +{ + computeScaledAddress(src, SecondScratchReg); + + if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) { + as_lwl(temp, SecondScratchReg, src.offset + 3); + as_lwr(temp, SecondScratchReg, src.offset); + } else { + ma_li(ScratchRegister, Imm32(src.offset)); + as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); + as_lwl(temp, ScratchRegister, 3); + as_lwr(temp, ScratchRegister, 0); + } + + moveToFloat32(temp, dest); +} + +void +MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address) +{ + ma_li(SecondScratchReg, imm); + ma_store(SecondScratchReg, address, SizeByte); +} + +void +MacroAssemblerMIPS64Compat::store8(Register src, const Address& address) +{ + ma_store(src, address, SizeByte); +} + +void +MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest) +{ + ma_store(imm, dest, SizeByte); +} + +void +MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest) +{ + ma_store(src, dest, SizeByte); +} + +void +MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address) +{ + ma_li(SecondScratchReg, imm); + ma_store(SecondScratchReg, address, SizeHalfWord); +} + +void +MacroAssemblerMIPS64Compat::store16(Register src, const Address& address) +{ + ma_store(src, address, SizeHalfWord); +} + +void +MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest) +{ + ma_store(imm, dest, SizeHalfWord); +} + +void +MacroAssemblerMIPS64Compat::store16(Register src, const BaseIndex& address) +{ + ma_store(src, address, SizeHalfWord); +} + +void +MacroAssemblerMIPS64Compat::store32(Register src, AbsoluteAddress address) +{ + movePtr(ImmPtr(address.addr), ScratchRegister); + store32(src, Address(ScratchRegister, 0)); +} + +void +MacroAssemblerMIPS64Compat::store32(Register src, const Address& address) +{ + ma_store(src, address, SizeWord); +} + +void +MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address) +{ + move32(src, SecondScratchReg); + ma_store(SecondScratchReg, address, SizeWord); +} + +void +MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest) +{ + ma_store(imm, dest, SizeWord); +} + +void +MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest) +{ + ma_store(src, dest, SizeWord); +} + +template <typename T> +void +MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address) +{ + ma_li(SecondScratchReg, imm); + ma_store(SecondScratchReg, address, SizeDouble); +} + +template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm, Address address); +template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmWord imm, BaseIndex address); + +template <typename T> +void +MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address) +{ + storePtr(ImmWord(uintptr_t(imm.value)), address); +} + +template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm, Address address); +template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmPtr imm, BaseIndex address); + +template <typename T> +void +MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address) +{ + movePtr(imm, SecondScratchReg); + storePtr(SecondScratchReg, address); +} + +template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm, Address address); +template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(ImmGCPtr imm, BaseIndex address); + +void +MacroAssemblerMIPS64Compat::storePtr(Register src, const Address& address) +{ + ma_store(src, address, SizeDouble); +} + +void +MacroAssemblerMIPS64Compat::storePtr(Register src, const BaseIndex& address) +{ + ma_store(src, address, SizeDouble); +} + +void +MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest) +{ + movePtr(ImmPtr(dest.addr), ScratchRegister); + storePtr(src, Address(ScratchRegister, 0)); +} + +void +MacroAssemblerMIPS64Compat::storeUnalignedFloat32(FloatRegister src, Register temp, + const BaseIndex& dest) +{ + computeScaledAddress(dest, SecondScratchReg); + moveFromFloat32(src, temp); + + if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) { + as_swl(temp, SecondScratchReg, dest.offset + 3); + as_swr(temp, SecondScratchReg, dest.offset); + } else { + ma_li(ScratchRegister, Imm32(dest.offset)); + as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); + as_swl(temp, ScratchRegister, 3); + as_swr(temp, ScratchRegister, 0); + } +} + +void +MacroAssemblerMIPS64Compat::storeUnalignedDouble(FloatRegister src, Register temp, + const BaseIndex& dest) +{ + computeScaledAddress(dest, SecondScratchReg); + moveFromDouble(src, temp); + + if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) { + as_sdl(temp, SecondScratchReg, dest.offset + 7); + as_sdr(temp, SecondScratchReg, dest.offset); + } else { + ma_li(ScratchRegister, Imm32(dest.offset)); + as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); + as_sdl(temp, ScratchRegister, 7); + as_sdr(temp, ScratchRegister, 0); + } +} + +// Note: this function clobbers the input register. +void +MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) +{ + MOZ_ASSERT(input != ScratchDoubleReg); + Label positive, done; + + // <= 0 or NaN --> 0 + zeroDouble(ScratchDoubleReg); + branchDouble(DoubleGreaterThan, input, ScratchDoubleReg, &positive); + { + move32(Imm32(0), output); + jump(&done); + } + + bind(&positive); + + // Add 0.5 and truncate. + loadConstantDouble(0.5, ScratchDoubleReg); + addDouble(ScratchDoubleReg, input); + + Label outOfRange; + + branchTruncateDoubleMaybeModUint32(input, output, &outOfRange); + asMasm().branch32(Assembler::Above, output, Imm32(255), &outOfRange); + { + // Check if we had a tie. + convertInt32ToDouble(output, ScratchDoubleReg); + branchDouble(DoubleNotEqual, input, ScratchDoubleReg, &done); + + // It was a tie. Mask out the ones bit to get an even value. + // See also js_TypedArray_uint8_clamp_double. + and32(Imm32(~1), output); + jump(&done); + } + + // > 255 --> 255 + bind(&outOfRange); + { + move32(Imm32(255), output); + } + + bind(&done); +} + +void +MacroAssemblerMIPS64Compat::testNullSet(Condition cond, const ValueOperand& value, Register dest) +{ + MOZ_ASSERT(cond == Equal || cond == NotEqual); + splitTag(value, SecondScratchReg); + ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond); +} + +void +MacroAssemblerMIPS64Compat::testObjectSet(Condition cond, const ValueOperand& value, Register dest) +{ + MOZ_ASSERT(cond == Equal || cond == NotEqual); + splitTag(value, SecondScratchReg); + ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond); +} + +void +MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond, const ValueOperand& value, Register dest) +{ + MOZ_ASSERT(cond == Equal || cond == NotEqual); + splitTag(value, SecondScratchReg); + ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond); +} + +// unboxing code +void +MacroAssemblerMIPS64Compat::unboxNonDouble(const ValueOperand& operand, Register dest) +{ + ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::unboxNonDouble(const Address& src, Register dest) +{ + loadPtr(Address(src.base, src.offset), dest); + ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::unboxNonDouble(const BaseIndex& src, Register dest) +{ + computeScaledAddress(src, SecondScratchReg); + loadPtr(Address(SecondScratchReg, src.offset), dest); + ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand, Register dest) +{ + ma_sll(dest, operand.valueReg(), Imm32(0)); +} + +void +MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest) +{ + ma_sll(dest, src, Imm32(0)); +} + +void +MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest) +{ + load32(Address(src.base, src.offset), dest); +} + +void +MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src, Register dest) +{ + computeScaledAddress(src, SecondScratchReg); + load32(Address(SecondScratchReg, src.offset), dest); +} + +void +MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand, Register dest) +{ + ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32)); +} + +void +MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest) +{ + ma_dext(dest, src, Imm32(0), Imm32(32)); +} + +void +MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src, Register dest) +{ + ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend); +} + +void +MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src, Register dest) +{ + computeScaledAddress(src, SecondScratchReg); + ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend); +} + +void +MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand, FloatRegister dest) +{ + as_dmtc1(operand.valueReg(), dest); +} + +void +MacroAssemblerMIPS64Compat::unboxDouble(const Address& src, FloatRegister dest) +{ + ma_ld(dest, Address(src.base, src.offset)); +} + +void +MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand, Register dest) +{ + unboxNonDouble(operand, dest); +} + +void +MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest) +{ + ma_dext(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::unboxString(const Address& src, Register dest) +{ + unboxNonDouble(src, dest); +} + +void +MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest) +{ + ma_dext(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src, Register dest) +{ + unboxNonDouble(src, dest); +} + +void +MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src, Register dest) +{ + unboxNonDouble(src, dest); +} + +void +MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest) +{ + ma_dext(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::unboxObject(const Address& src, Register dest) +{ + unboxNonDouble(src, dest); +} + +void +MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src, AnyRegister dest) +{ + if (dest.isFloat()) { + Label notInt32, end; + asMasm().branchTestInt32(Assembler::NotEqual, src, ¬Int32); + convertInt32ToDouble(src.valueReg(), dest.fpu()); + ma_b(&end, ShortJump); + bind(¬Int32); + unboxDouble(src, dest.fpu()); + bind(&end); + } else { + unboxNonDouble(src, dest.gpr()); + } +} + +void +MacroAssemblerMIPS64Compat::unboxPrivate(const ValueOperand& src, Register dest) +{ + ma_dsrl(dest, src.valueReg(), Imm32(1)); +} + +void +MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src, const ValueOperand& dest) +{ + as_dmfc1(dest.valueReg(), src); +} + +void +MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src, + const ValueOperand& dest) +{ + MOZ_ASSERT(src != dest.valueReg()); + boxValue(type, src, dest.valueReg()); +} + +void +MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand, FloatRegister dest) +{ + convertBoolToInt32(operand.valueReg(), ScratchRegister); + convertInt32ToDouble(ScratchRegister, dest); +} + +void +MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand, + FloatRegister dest) +{ + convertInt32ToDouble(operand.valueReg(), dest); +} + +void +MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand, + FloatRegister dest) +{ + + convertBoolToInt32(operand.valueReg(), ScratchRegister); + convertInt32ToFloat32(ScratchRegister, dest); +} + +void +MacroAssemblerMIPS64Compat::int32ValueToFloat32(const ValueOperand& operand, + FloatRegister dest) +{ + convertInt32ToFloat32(operand.valueReg(), dest); +} + +void +MacroAssemblerMIPS64Compat::loadConstantFloat32(float f, FloatRegister dest) +{ + ma_lis(dest, f); +} + +void +MacroAssemblerMIPS64Compat::loadConstantFloat32(wasm::RawF32 f, FloatRegister dest) +{ + ma_lis(dest, f); +} + +void +MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src, FloatRegister dest) +{ + Label notInt32, end; + // If it's an int, convert it to double. + loadPtr(Address(src.base, src.offset), ScratchRegister); + ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT)); + asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32); + loadPtr(Address(src.base, src.offset), SecondScratchReg); + convertInt32ToDouble(SecondScratchReg, dest); + ma_b(&end, ShortJump); + + // Not an int, just load as double. + bind(¬Int32); + ma_ld(dest, src); + bind(&end); +} + +void +MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest) +{ + Label notInt32, end; + + // If it's an int, convert it to double. + computeScaledAddress(addr, SecondScratchReg); + // Since we only have one scratch, we need to stomp over it with the tag. + loadPtr(Address(SecondScratchReg, 0), ScratchRegister); + ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT)); + asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, ¬Int32); + + computeScaledAddress(addr, SecondScratchReg); + loadPtr(Address(SecondScratchReg, 0), SecondScratchReg); + convertInt32ToDouble(SecondScratchReg, dest); + ma_b(&end, ShortJump); + + // Not an int, just load as double. + bind(¬Int32); + // First, recompute the offset that had been stored in the scratch register + // since the scratch register was overwritten loading in the type. + computeScaledAddress(addr, SecondScratchReg); + loadDouble(Address(SecondScratchReg, 0), dest); + bind(&end); +} + +void +MacroAssemblerMIPS64Compat::loadConstantDouble(double dp, FloatRegister dest) +{ + ma_lid(dest, dp); +} + +void +MacroAssemblerMIPS64Compat::loadConstantDouble(wasm::RawF64 d, FloatRegister dest) +{ + ImmWord imm(d.bits()); + + ma_li(ScratchRegister, imm); + moveToDouble(ScratchRegister, dest); +} + +Register +MacroAssemblerMIPS64Compat::extractObject(const Address& address, Register scratch) +{ + loadPtr(Address(address.base, address.offset), scratch); + ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); + return scratch; +} + +Register +MacroAssemblerMIPS64Compat::extractTag(const Address& address, Register scratch) +{ + loadPtr(Address(address.base, address.offset), scratch); + ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT)); + return scratch; +} + +Register +MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address, Register scratch) +{ + computeScaledAddress(address, scratch); + return extractTag(Address(scratch, address.offset), scratch); +} + +void +MacroAssemblerMIPS64Compat::moveValue(const Value& val, Register dest) +{ + writeDataRelocation(val); + movWithPatch(ImmWord(val.asRawBits()), dest); +} + +void +MacroAssemblerMIPS64Compat::moveValue(const Value& val, const ValueOperand& dest) +{ + moveValue(val, dest.valueReg()); +} + +/* There are 3 paths trough backedge jump. They are listed here in the order + * in which instructions are executed. + * - The short jump is simple: + * b offset # Jumps directly to target. + * lui at, addr1_hl # In delay slot. Don't care about 'at' here. + * + * - The long jump to loop header: + * b label1 + * lui at, addr1_hl # In delay slot. We use the value in 'at' later. + * label1: + * ori at, addr1_lh + * drotr32 at, at, 48 + * ori at, addr1_ll + * jr at + * lui at, addr2_hl # In delay slot. Don't care about 'at' here. + * + * - The long jump to interrupt loop: + * b label2 + * ... + * jr at + * label2: + * lui at, addr2_hl # In delay slot. Don't care about 'at' here. + * ori at, addr2_lh + * drotr32 at, at, 48 + * ori at, addr2_ll + * jr at + * nop # In delay slot. + * + * The backedge is done this way to avoid patching lui+ori pair while it is + * being executed. Look also at jit::PatchBackedge(). + */ +CodeOffsetJump +MacroAssemblerMIPS64Compat::backedgeJump(RepatchLabel* label, Label* documentation) +{ + // Only one branch per label. + MOZ_ASSERT(!label->used()); + uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET; + BufferOffset bo = nextOffset(); + label->use(bo.getOffset()); + + // Backedges are short jumps when bound, but can become long when patched. + m_buffer.ensureSpace(16 * sizeof(uint32_t)); + if (label->bound()) { + int32_t offset = label->offset() - bo.getOffset(); + MOZ_ASSERT(BOffImm16::IsInRange(offset)); + as_b(BOffImm16(offset)); + } else { + // Jump to "label1" by default to jump to the loop header. + as_b(BOffImm16(2 * sizeof(uint32_t))); + } + // No need for nop here. We can safely put next instruction in delay slot. + ma_liPatchable(ScratchRegister, ImmWord(dest)); + MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 5 * sizeof(uint32_t)); + as_jr(ScratchRegister); + // No need for nop here. We can safely put next instruction in delay slot. + ma_liPatchable(ScratchRegister, ImmWord(dest)); + as_jr(ScratchRegister); + as_nop(); + MOZ_ASSERT(nextOffset().getOffset() - bo.getOffset() == 12 * sizeof(uint32_t)); + return CodeOffsetJump(bo.getOffset()); +} + +CodeOffsetJump +MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentation) +{ + // Only one branch per label. + MOZ_ASSERT(!label->used()); + uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET; + + BufferOffset bo = nextOffset(); + label->use(bo.getOffset()); + addLongJump(bo); + ma_liPatchable(ScratchRegister, ImmWord(dest)); + as_jr(ScratchRegister); + as_nop(); + return CodeOffsetJump(bo.getOffset()); +} + +///////////////////////////////////////////////////////////////// +// X86/X64-common/ARM/MIPS interface. +///////////////////////////////////////////////////////////////// +void +MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst) +{ + storeValue(val, Address(Register::FromCode(dst.base()), dst.disp())); +} + +void +MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const BaseIndex& dest) +{ + computeScaledAddress(dest, SecondScratchReg); + storeValue(val, Address(SecondScratchReg, dest.offset)); +} + +void +MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, BaseIndex dest) +{ + computeScaledAddress(dest, ScratchRegister); + + int32_t offset = dest.offset; + if (!Imm16::IsInSignedRange(offset)) { + ma_li(SecondScratchReg, Imm32(offset)); + as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg); + offset = 0; + } + + storeValue(type, reg, Address(ScratchRegister, offset)); +} + +void +MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, const Address& dest) +{ + storePtr(val.valueReg(), Address(dest.base, dest.offset)); +} + +void +MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, Address dest) +{ + MOZ_ASSERT(dest.base != SecondScratchReg); + + ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type))); + ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT)); + ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); + storePtr(SecondScratchReg, Address(dest.base, dest.offset)); +} + +void +MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest) +{ + if (val.isMarkable()) { + writeDataRelocation(val); + movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg); + } else { + ma_li(SecondScratchReg, ImmWord(val.asRawBits())); + } + storePtr(SecondScratchReg, Address(dest.base, dest.offset)); +} + +void +MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest) +{ + computeScaledAddress(dest, ScratchRegister); + + int32_t offset = dest.offset; + if (!Imm16::IsInSignedRange(offset)) { + ma_li(SecondScratchReg, Imm32(offset)); + as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg); + offset = 0; + } + storeValue(val, Address(ScratchRegister, offset)); +} + +void +MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr, ValueOperand val) +{ + computeScaledAddress(addr, SecondScratchReg); + loadValue(Address(SecondScratchReg, addr.offset), val); +} + +void +MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val) +{ + loadPtr(Address(src.base, src.offset), val.valueReg()); +} + +void +MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest) +{ + MOZ_ASSERT(dest.valueReg() != ScratchRegister); + if (payload != dest.valueReg()) + ma_move(dest.valueReg(), payload); + ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type))); + ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT)); +} + +void +MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) +{ + // Allocate stack slots for Value. One for each. + asMasm().subPtr(Imm32(sizeof(Value)), StackPointer); + // Store Value + storeValue(val, Address(StackPointer, 0)); +} + +void +MacroAssemblerMIPS64Compat::pushValue(const Address& addr) +{ + // Load value before allocate stack, addr.base may be is sp. + loadPtr(Address(addr.base, addr.offset), ScratchRegister); + ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value))); + storePtr(ScratchRegister, Address(StackPointer, 0)); +} + +void +MacroAssemblerMIPS64Compat::popValue(ValueOperand val) +{ + as_ld(val.valueReg(), StackPointer, 0); + as_daddiu(StackPointer, StackPointer, sizeof(Value)); +} + +void +MacroAssemblerMIPS64Compat::breakpoint() +{ + as_break(0); +} + +void +MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source, FloatRegister dest, + Label* failure) +{ + Label isDouble, done; + Register tag = splitTagForTest(source); + asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble); + asMasm().branchTestInt32(Assembler::NotEqual, tag, failure); + + unboxInt32(source, ScratchRegister); + convertInt32ToDouble(ScratchRegister, dest); + jump(&done); + + bind(&isDouble); + unboxDouble(source, dest); + + bind(&done); +} + +void +MacroAssemblerMIPS64Compat::checkStackAlignment() +{ +#ifdef DEBUG + Label aligned; + as_andi(ScratchRegister, sp, ABIStackAlignment - 1); + ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump); + as_break(BREAK_STACK_UNALIGNED); + bind(&aligned); +#endif +} + +void +MacroAssembler::alignFrameForICArguments(AfterICSaveLive& aic) +{ + if (framePushed() % ABIStackAlignment != 0) { + aic.alignmentPadding = ABIStackAlignment - (framePushed() % ABIStackAlignment); + reserveStack(aic.alignmentPadding); + } else { + aic.alignmentPadding = 0; + } + MOZ_ASSERT(framePushed() % ABIStackAlignment == 0); + checkStackAlignment(); +} + +void +MacroAssembler::restoreFrameAlignmentForICArguments(AfterICSaveLive& aic) +{ + if (aic.alignmentPadding != 0) + freeStack(aic.alignmentPadding); +} + +void +MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler) +{ + // Reserve space for exception information. + int size = (sizeof(ResumeFromException) + ABIStackAlignment) & ~(ABIStackAlignment - 1); + asMasm().subPtr(Imm32(size), StackPointer); + ma_move(a0, StackPointer); // Use a0 since it is a first function argument + + // Call the handler. + asMasm().setupUnalignedABICall(a1); + asMasm().passABIArg(a0); + asMasm().callWithABI(handler); + + Label entryFrame; + Label catch_; + Label finally; + Label return_; + Label bailout; + + // Already clobbered a0, so use it... + load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0); + asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), + &entryFrame); + asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_); + asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally); + asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), + &return_); + asMasm().branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout); + + breakpoint(); // Invalid kind. + + // No exception handler. Load the error value, load the new stack pointer + // and return from the entry frame. + bind(&entryFrame); + moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); + loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer); + + // We're going to be returning by the ion calling convention + ma_pop(ra); + as_jr(ra); + as_nop(); + + // If we found a catch handler, this must be a baseline frame. Restore + // state and jump to the catch block. + bind(&catch_); + loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0); + loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer); + jump(a0); + + // If we found a finally block, this must be a baseline frame. Push + // two values expected by JSOP_RETSUB: BooleanValue(true) and the + // exception. + bind(&finally); + ValueOperand exception = ValueOperand(a1); + loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception); + + loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0); + loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp); + + pushValue(BooleanValue(true)); + pushValue(exception); + jump(a0); + + // Only used in debug mode. Return BaselineFrame->returnValue() to the + // caller. + bind(&return_); + loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)), BaselineFrameReg); + loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer); + loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()), + JSReturnOperand); + ma_move(StackPointer, BaselineFrameReg); + pop(BaselineFrameReg); + + // If profiling is enabled, then update the lastProfilingFrame to refer to caller + // frame before returning. + { + Label skipProfilingInstrumentation; + // Test if profiler enabled. + AbsoluteAddress addressOfEnabled(GetJitContext()->runtime->spsProfiler().addressOfEnabled()); + asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0), + &skipProfilingInstrumentation); + profilerExitFrame(); + bind(&skipProfilingInstrumentation); + } + + ret(); + + // If we are bailing out to baseline to handle an exception, jump to + // the bailout tail stub. + bind(&bailout); + loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2); + ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK)); + loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1); + jump(a1); +} + +template<typename T> +void +MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, + Register oldval, Register newval, + Register temp, Register valueTemp, + Register offsetTemp, Register maskTemp, + AnyRegister output) +{ + switch (arrayType) { + case Scalar::Int8: + compareExchange8SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Uint8: + compareExchange8ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Int16: + compareExchange16SignExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Uint16: + compareExchange16ZeroExtend(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Int32: + compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Uint32: + // At the moment, the code in MCallOptimize.cpp requires the output + // type to be double for uint32 arrays. See bug 1077305. + MOZ_ASSERT(output.isFloat()); + compareExchange32(mem, oldval, newval, valueTemp, offsetTemp, maskTemp, temp); + convertUInt32ToDouble(temp, output.fpu()); + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem, + Register oldval, Register newval, Register temp, + Register valueTemp, Register offsetTemp, Register maskTemp, + AnyRegister output); +template void +MacroAssemblerMIPS64Compat::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem, + Register oldval, Register newval, Register temp, + Register valueTemp, Register offsetTemp, Register maskTemp, + AnyRegister output); + +template<typename T> +void +MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, + Register value, Register temp, Register valueTemp, + Register offsetTemp, Register maskTemp, + AnyRegister output) +{ + switch (arrayType) { + case Scalar::Int8: + atomicExchange8SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Uint8: + atomicExchange8ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Int16: + atomicExchange16SignExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Uint16: + atomicExchange16ZeroExtend(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Int32: + atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, output.gpr()); + break; + case Scalar::Uint32: + // At the moment, the code in MCallOptimize.cpp requires the output + // type to be double for uint32 arrays. See bug 1077305. + MOZ_ASSERT(output.isFloat()); + atomicExchange32(mem, value, valueTemp, offsetTemp, maskTemp, temp); + convertUInt32ToDouble(temp, output.fpu()); + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const Address& mem, + Register value, Register temp, Register valueTemp, + Register offsetTemp, Register maskTemp, + AnyRegister output); +template void +MacroAssemblerMIPS64Compat::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem, + Register value, Register temp, Register valueTemp, + Register offsetTemp, Register maskTemp, + AnyRegister output); + +CodeOffset +MacroAssemblerMIPS64Compat::toggledJump(Label* label) +{ + CodeOffset ret(nextOffset().getOffset()); + ma_b(label); + return ret; +} + +CodeOffset +MacroAssemblerMIPS64Compat::toggledCall(JitCode* target, bool enabled) +{ + BufferOffset bo = nextOffset(); + CodeOffset offset(bo.getOffset()); + addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE); + ma_liPatchable(ScratchRegister, ImmPtr(target->raw())); + if (enabled) { + as_jalr(ScratchRegister); + as_nop(); + } else { + as_nop(); + as_nop(); + } + MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() == ToggledCallSize(nullptr)); + return offset; +} + +void +MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr, Register scratch) +{ + AbsoluteAddress activation(GetJitContext()->runtime->addressOfProfilingActivation()); + loadPtr(activation, scratch); + storePtr(framePtr, Address(scratch, JitActivation::offsetOfLastProfilingFrame())); + storePtr(ImmPtr(nullptr), Address(scratch, JitActivation::offsetOfLastProfilingCallSite())); +} + +void +MacroAssemblerMIPS64Compat::profilerExitFrame() +{ + branch(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail()); +} + +void +MacroAssembler::subFromStackPtr(Imm32 imm32) +{ + if (imm32.value) + asMasm().subPtr(imm32, StackPointer); +} + +//{{{ check_macroassembler_style +// =============================================================== +// Stack manipulation functions. + +void +MacroAssembler::PushRegsInMask(LiveRegisterSet set) +{ + int32_t diff = set.gprs().size() * sizeof(intptr_t) + + set.fpus().getPushSizeInBytes(); + const int32_t reserved = diff; + + reserveStack(reserved); + for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) { + diff -= sizeof(intptr_t); + storePtr(*iter, Address(StackPointer, diff)); + } + for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) { + diff -= sizeof(double); + storeDouble(*iter, Address(StackPointer, diff)); + } + MOZ_ASSERT(diff == 0); +} + +void +MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore) +{ + int32_t diff = set.gprs().size() * sizeof(intptr_t) + + set.fpus().getPushSizeInBytes(); + const int32_t reserved = diff; + + for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) { + diff -= sizeof(intptr_t); + if (!ignore.has(*iter)) + loadPtr(Address(StackPointer, diff), *iter); + } + for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush()); iter.more(); ++iter) { + diff -= sizeof(double); + if (!ignore.has(*iter)) + loadDouble(Address(StackPointer, diff), *iter); + } + MOZ_ASSERT(diff == 0); + freeStack(reserved); +} + +// =============================================================== +// ABI function calls. + +void +MacroAssembler::setupUnalignedABICall(Register scratch) +{ + setupABICall(); + dynamicAlignment_ = true; + + ma_move(scratch, StackPointer); + + // Force sp to be aligned + asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer); + ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1))); + storePtr(scratch, Address(StackPointer, 0)); +} + +void +MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) +{ + MOZ_ASSERT(inCall_); + uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar(); + + // Reserve place for $ra. + stackForCall += sizeof(intptr_t); + + if (dynamicAlignment_) { + stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment); + } else { + uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0; + stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue, + ABIStackAlignment); + } + + *stackAdjust = stackForCall; + reserveStack(stackForCall); + + // Save $ra because call is going to clobber it. Restore it in + // callWithABIPost. NOTE: This is needed for calls from SharedIC. + // Maybe we can do this differently. + storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t))); + + // Position all arguments. + { + enoughMemory_ = enoughMemory_ && moveResolver_.resolve(); + if (!enoughMemory_) + return; + + MoveEmitter emitter(*this); + emitter.emit(moveResolver_); + emitter.finish(); + } + + assertStackAlignment(ABIStackAlignment); +} + +void +MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result) +{ + // Restore ra value (as stored in callWithABIPre()). + loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra); + + if (dynamicAlignment_) { + // Restore sp value from stack (as stored in setupUnalignedABICall()). + loadPtr(Address(StackPointer, stackAdjust), StackPointer); + // Use adjustFrame instead of freeStack because we already restored sp. + adjustFrame(-stackAdjust); + } else { + freeStack(stackAdjust); + } + +#ifdef DEBUG + MOZ_ASSERT(inCall_); + inCall_ = false; +#endif +} + +void +MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) +{ + // Load the callee in t9, no instruction between the lw and call + // should clobber it. Note that we can't use fun.base because it may + // be one of the IntArg registers clobbered before the call. + ma_move(t9, fun); + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(t9); + callWithABIPost(stackAdjust, result); +} + +void +MacroAssembler::callWithABINoProfiler(const Address& fun, MoveOp::Type result) +{ + // Load the callee in t9, as above. + loadPtr(Address(fun.base, fun.offset), t9); + uint32_t stackAdjust; + callWithABIPre(&stackAdjust); + call(t9); + callWithABIPost(stackAdjust, result); +} + +// =============================================================== +// Branch functions + +void +MacroAssembler::branchValueIsNurseryObject(Condition cond, const Address& address, Register temp, + Label* label) +{ + branchValueIsNurseryObjectImpl(cond, address, temp, label); +} + +void +MacroAssembler::branchValueIsNurseryObject(Condition cond, ValueOperand value, + Register temp, Label* label) +{ + branchValueIsNurseryObjectImpl(cond, value, temp, label); +} + +template <typename T> +void +MacroAssembler::branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp, + Label* label) +{ + MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual); + + Label done; + branchTestObject(Assembler::NotEqual, value, cond == Assembler::Equal ? &done : label); + + extractObject(value, SecondScratchReg); + orPtr(Imm32(gc::ChunkMask), SecondScratchReg); + branch32(cond, Address(SecondScratchReg, gc::ChunkLocationOffsetFromLastByte), + Imm32(int32_t(gc::ChunkLocation::Nursery)), label); + + bind(&done); +} + +void +MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs, + const Value& rhs, Label* label) +{ + MOZ_ASSERT(cond == Equal || cond == NotEqual); + ScratchRegisterScope scratch(*this); + moveValue(rhs, scratch); + ma_b(lhs.valueReg(), scratch, label, cond); +} + +// ======================================================================== +// Memory access primitives. +template <typename T> +void +MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, + const T& dest, MIRType slotType) +{ + if (valueType == MIRType::Double) { + storeDouble(value.reg().typedReg().fpu(), dest); + return; + } + + // For known integers and booleans, we can just store the unboxed value if + // the slot has the same type. + if ((valueType == MIRType::Int32 || valueType == MIRType::Boolean) && slotType == valueType) { + if (value.constant()) { + Value val = value.value(); + if (valueType == MIRType::Int32) + store32(Imm32(val.toInt32()), dest); + else + store32(Imm32(val.toBoolean() ? 1 : 0), dest); + } else { + store32(value.reg().typedReg().gpr(), dest); + } + return; + } + + if (value.constant()) + storeValue(value.value(), dest); + else + storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(), dest); +} + +template void +MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, + const Address& dest, MIRType slotType); +template void +MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, + const BaseIndex& dest, MIRType slotType); + +//}}} check_macroassembler_style diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h new file mode 100644 index 000000000..4cff87236 --- /dev/null +++ b/js/src/jit/mips64/MacroAssembler-mips64.h @@ -0,0 +1,1041 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_MacroAssembler_mips64_h +#define jit_mips64_MacroAssembler_mips64_h + +#include "jsopcode.h" + +#include "jit/IonCaches.h" +#include "jit/JitFrames.h" +#include "jit/mips-shared/MacroAssembler-mips-shared.h" +#include "jit/MoveResolver.h" + +namespace js { +namespace jit { + +enum LiFlags +{ + Li64 = 0, + Li48 = 1, +}; + +struct ImmShiftedTag : public ImmWord +{ + explicit ImmShiftedTag(JSValueShiftedTag shtag) + : ImmWord((uintptr_t)shtag) + { } + + explicit ImmShiftedTag(JSValueType type) + : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) + { } +}; + +struct ImmTag : public Imm32 +{ + ImmTag(JSValueTag mask) + : Imm32(int32_t(mask)) + { } +}; + +static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg); + +static const int defaultShift = 3; +static_assert(1 << defaultShift == sizeof(JS::Value), "The defaultShift is wrong"); + +class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared +{ + public: + using MacroAssemblerMIPSShared::ma_b; + using MacroAssemblerMIPSShared::ma_li; + using MacroAssemblerMIPSShared::ma_ss; + using MacroAssemblerMIPSShared::ma_sd; + using MacroAssemblerMIPSShared::ma_load; + using MacroAssemblerMIPSShared::ma_store; + using MacroAssemblerMIPSShared::ma_cmp_set; + using MacroAssemblerMIPSShared::ma_subTestOverflow; + + void ma_li(Register dest, CodeOffset* label); + void ma_li(Register dest, ImmWord imm); + void ma_liPatchable(Register dest, ImmPtr imm); + void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48); + + // Negate + void ma_dnegu(Register rd, Register rs); + + // Shift operations + void ma_dsll(Register rd, Register rt, Imm32 shift); + void ma_dsrl(Register rd, Register rt, Imm32 shift); + void ma_dsra(Register rd, Register rt, Imm32 shift); + void ma_dror(Register rd, Register rt, Imm32 shift); + void ma_drol(Register rd, Register rt, Imm32 shift); + + void ma_dsll(Register rd, Register rt, Register shift); + void ma_dsrl(Register rd, Register rt, Register shift); + void ma_dsra(Register rd, Register rt, Register shift); + void ma_dror(Register rd, Register rt, Register shift); + void ma_drol(Register rd, Register rt, Register shift); + + void ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size); + void ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size); + + void ma_dctz(Register rd, Register rs); + + // load + void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord, + LoadStoreExtension extension = SignExtend); + + // store + void ma_store(Register data, Address address, LoadStoreSize size = SizeWord, + LoadStoreExtension extension = SignExtend); + + // arithmetic based ops + // add + void ma_daddu(Register rd, Register rs, Imm32 imm); + void ma_daddu(Register rd, Register rs); + void ma_daddu(Register rd, Imm32 imm); + template <typename L> + void ma_addTestOverflow(Register rd, Register rs, Register rt, L overflow); + template <typename L> + void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow); + + // subtract + void ma_dsubu(Register rd, Register rs, Imm32 imm); + void ma_dsubu(Register rd, Register rs); + void ma_dsubu(Register rd, Imm32 imm); + void ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow); + + // multiplies. For now, there are only few that we care about. + void ma_dmult(Register rs, Imm32 imm); + + // stack + void ma_pop(Register r); + void ma_push(Register r); + + void branchWithCode(InstImm code, Label* label, JumpKind jumpKind); + // branches when done from within mips-specific code + void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump); + void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump); + void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump); + void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump); + void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) { + MOZ_ASSERT(rhs != ScratchRegister); + ma_load(ScratchRegister, addr, SizeDouble); + ma_b(ScratchRegister, rhs, l, c, jumpKind); + } + + void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot); + + // fp instructions + void ma_lid(FloatRegister dest, double value); + + void ma_mv(FloatRegister src, ValueOperand dest); + void ma_mv(ValueOperand src, FloatRegister dest); + + void ma_ls(FloatRegister fd, Address address); + void ma_ld(FloatRegister fd, Address address); + void ma_sd(FloatRegister fd, Address address); + void ma_ss(FloatRegister fd, Address address); + + void ma_pop(FloatRegister fs); + void ma_push(FloatRegister fs); + + void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c); + void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c); + + // These functions abstract the access to high part of the double precision + // float register. They are intended to work on both 32 bit and 64 bit + // floating point coprocessor. + void moveToDoubleHi(Register src, FloatRegister dest) { + as_mthc1(src, dest); + } + void moveFromDoubleHi(FloatRegister src, Register dest) { + as_mfhc1(dest, src); + } + + void moveToDouble(Register src, FloatRegister dest) { + as_dmtc1(src, dest); + } + void moveFromDouble(FloatRegister src, Register dest) { + as_dmfc1(dest, src); + } +}; + +class MacroAssembler; + +class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64 +{ + public: + using MacroAssemblerMIPS64::call; + + MacroAssemblerMIPS64Compat() + { } + + void convertBoolToInt32(Register source, Register dest); + void convertInt32ToDouble(Register src, FloatRegister dest); + void convertInt32ToDouble(const Address& src, FloatRegister dest); + void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest); + void convertUInt32ToDouble(Register src, FloatRegister dest); + void convertUInt32ToFloat32(Register src, FloatRegister dest); + void convertDoubleToFloat32(FloatRegister src, FloatRegister dest); + void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail, + bool negativeZeroCheck = true); + void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail, + bool negativeZeroCheck = true); + + void convertFloat32ToDouble(FloatRegister src, FloatRegister dest); + void convertInt32ToFloat32(Register src, FloatRegister dest); + void convertInt32ToFloat32(const Address& src, FloatRegister dest); + + void movq(Register rs, Register rd); + + void computeScaledAddress(const BaseIndex& address, Register dest); + + void computeEffectiveAddress(const Address& address, Register dest) { + ma_daddu(dest, address.base, Imm32(address.offset)); + } + + inline void computeEffectiveAddress(const BaseIndex& address, Register dest); + + void j(Label* dest) { + ma_b(dest); + } + + void mov(Register src, Register dest) { + as_ori(dest, src, 0); + } + void mov(ImmWord imm, Register dest) { + ma_li(dest, imm); + } + void mov(ImmPtr imm, Register dest) { + mov(ImmWord(uintptr_t(imm.value)), dest); + } + void mov(Register src, Address dest) { + MOZ_CRASH("NYI-IC"); + } + void mov(Address src, Register dest) { + MOZ_CRASH("NYI-IC"); + } + + void writeDataRelocation(const Value& val) { + if (val.isMarkable()) { + gc::Cell* cell = val.toMarkablePointer(); + if (cell && gc::IsInsideNursery(cell)) + embedsNurseryPointers_ = true; + dataRelocations_.writeUnsigned(currentOffset()); + } + } + + void branch(JitCode* c) { + BufferOffset bo = m_buffer.nextOffset(); + addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE); + ma_liPatchable(ScratchRegister, ImmPtr(c->raw())); + as_jr(ScratchRegister); + as_nop(); + } + void branch(const Register reg) { + as_jr(reg); + as_nop(); + } + void nop() { + as_nop(); + } + void ret() { + ma_pop(ra); + as_jr(ra); + as_nop(); + } + inline void retn(Imm32 n); + void push(Imm32 imm) { + ma_li(ScratchRegister, imm); + ma_push(ScratchRegister); + } + void push(ImmWord imm) { + ma_li(ScratchRegister, imm); + ma_push(ScratchRegister); + } + void push(ImmGCPtr imm) { + ma_li(ScratchRegister, imm); + ma_push(ScratchRegister); + } + void push(const Address& address) { + loadPtr(address, ScratchRegister); + ma_push(ScratchRegister); + } + void push(Register reg) { + ma_push(reg); + } + void push(FloatRegister reg) { + ma_push(reg); + } + void pop(Register reg) { + ma_pop(reg); + } + void pop(FloatRegister reg) { + ma_pop(reg); + } + + // Emit a branch that can be toggled to a non-operation. On MIPS64 we use + // "andi" instruction to toggle the branch. + // See ToggleToJmp(), ToggleToCmp(). + CodeOffset toggledJump(Label* label); + + // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch + // this instruction. + CodeOffset toggledCall(JitCode* target, bool enabled); + + static size_t ToggledCallSize(uint8_t* code) { + // Six instructions used in: MacroAssemblerMIPS64Compat::toggledCall + return 6 * sizeof(uint32_t); + } + + CodeOffset pushWithPatch(ImmWord imm) { + CodeOffset offset = movWithPatch(imm, ScratchRegister); + ma_push(ScratchRegister); + return offset; + } + + CodeOffset movWithPatch(ImmWord imm, Register dest) { + CodeOffset offset = CodeOffset(currentOffset()); + ma_liPatchable(dest, imm, Li64); + return offset; + } + CodeOffset movWithPatch(ImmPtr imm, Register dest) { + CodeOffset offset = CodeOffset(currentOffset()); + ma_liPatchable(dest, imm); + return offset; + } + + void jump(Label* label) { + ma_b(label); + } + void jump(Register reg) { + as_jr(reg); + as_nop(); + } + void jump(const Address& address) { + loadPtr(address, ScratchRegister); + as_jr(ScratchRegister); + as_nop(); + } + + void jump(JitCode* code) { + branch(code); + } + + void jump(wasm::TrapDesc target) { + ma_b(target); + } + + void splitTag(Register src, Register dest) { + ma_dsrl(dest, src, Imm32(JSVAL_TAG_SHIFT)); + } + + void splitTag(const ValueOperand& operand, Register dest) { + splitTag(operand.valueReg(), dest); + } + + // Returns the register containing the type tag. + Register splitTagForTest(const ValueOperand& value) { + splitTag(value, SecondScratchReg); + return SecondScratchReg; + } + + // unboxing code + void unboxNonDouble(const ValueOperand& operand, Register dest); + void unboxNonDouble(const Address& src, Register dest); + void unboxNonDouble(const BaseIndex& src, Register dest); + void unboxInt32(const ValueOperand& operand, Register dest); + void unboxInt32(Register src, Register dest); + void unboxInt32(const Address& src, Register dest); + void unboxInt32(const BaseIndex& src, Register dest); + void unboxBoolean(const ValueOperand& operand, Register dest); + void unboxBoolean(Register src, Register dest); + void unboxBoolean(const Address& src, Register dest); + void unboxBoolean(const BaseIndex& src, Register dest); + void unboxDouble(const ValueOperand& operand, FloatRegister dest); + void unboxDouble(Register src, Register dest); + void unboxDouble(const Address& src, FloatRegister dest); + void unboxString(const ValueOperand& operand, Register dest); + void unboxString(Register src, Register dest); + void unboxString(const Address& src, Register dest); + void unboxSymbol(const ValueOperand& src, Register dest); + void unboxSymbol(Register src, Register dest); + void unboxSymbol(const Address& src, Register dest); + void unboxObject(const ValueOperand& src, Register dest); + void unboxObject(Register src, Register dest); + void unboxObject(const Address& src, Register dest); + void unboxObject(const BaseIndex& src, Register dest) { unboxNonDouble(src, dest); } + void unboxValue(const ValueOperand& src, AnyRegister dest); + void unboxPrivate(const ValueOperand& src, Register dest); + + void notBoolean(const ValueOperand& val) { + as_xori(val.valueReg(), val.valueReg(), 1); + } + + // boxing code + void boxDouble(FloatRegister src, const ValueOperand& dest); + void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest); + + // Extended unboxing API. If the payload is already in a register, returns + // that register. Otherwise, provides a move to the given scratch register, + // and returns that. + Register extractObject(const Address& address, Register scratch); + Register extractObject(const ValueOperand& value, Register scratch) { + unboxObject(value, scratch); + return scratch; + } + Register extractInt32(const ValueOperand& value, Register scratch) { + unboxInt32(value, scratch); + return scratch; + } + Register extractBoolean(const ValueOperand& value, Register scratch) { + unboxBoolean(value, scratch); + return scratch; + } + Register extractTag(const Address& address, Register scratch); + Register extractTag(const BaseIndex& address, Register scratch); + Register extractTag(const ValueOperand& value, Register scratch) { + MOZ_ASSERT(scratch != ScratchRegister); + splitTag(value, scratch); + return scratch; + } + + void boolValueToDouble(const ValueOperand& operand, FloatRegister dest); + void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest); + void loadInt32OrDouble(const Address& src, FloatRegister dest); + void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest); + void loadConstantDouble(double dp, FloatRegister dest); + void loadConstantDouble(wasm::RawF64 d, FloatRegister dest); + + void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest); + void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest); + void loadConstantFloat32(float f, FloatRegister dest); + void loadConstantFloat32(wasm::RawF32 f, FloatRegister dest); + + void testNullSet(Condition cond, const ValueOperand& value, Register dest); + + void testObjectSet(Condition cond, const ValueOperand& value, Register dest); + + void testUndefinedSet(Condition cond, const ValueOperand& value, Register dest); + + // higher level tag testing code + Address ToPayload(Address value) { + return value; + } + + void moveValue(const Value& val, Register dest); + + CodeOffsetJump backedgeJump(RepatchLabel* label, Label* documentation = nullptr); + CodeOffsetJump jumpWithPatch(RepatchLabel* label, Label* documentation = nullptr); + + template <typename T> + void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) { + if (dest.isFloat()) + loadInt32OrDouble(address, dest.fpu()); + else if (type == MIRType::Int32) + unboxInt32(address, dest.gpr()); + else if (type == MIRType::Boolean) + unboxBoolean(address, dest.gpr()); + else + unboxNonDouble(address, dest.gpr()); + } + + template <typename T> + void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes) { + switch (nbytes) { + case 8: + unboxNonDouble(value, ScratchRegister); + storePtr(ScratchRegister, address); + return; + case 4: + store32(value.valueReg(), address); + return; + case 1: + store8(value.valueReg(), address); + return; + default: MOZ_CRASH("Bad payload width"); + } + } + + void moveValue(const Value& val, const ValueOperand& dest); + + void moveValue(const ValueOperand& src, const ValueOperand& dest) { + if (src.valueReg() != dest.valueReg()) + ma_move(dest.valueReg(), src.valueReg()); + } + void boxValue(JSValueType type, Register src, Register dest) { + MOZ_ASSERT(src != dest); + + JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type); + ma_li(dest, Imm32(tag)); + ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT)); + ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT)); + } + + void storeValue(ValueOperand val, Operand dst); + void storeValue(ValueOperand val, const BaseIndex& dest); + void storeValue(JSValueType type, Register reg, BaseIndex dest); + void storeValue(ValueOperand val, const Address& dest); + void storeValue(JSValueType type, Register reg, Address dest); + void storeValue(const Value& val, Address dest); + void storeValue(const Value& val, BaseIndex dest); + void storeValue(const Address& src, const Address& dest, Register temp) { + loadPtr(src, temp); + storePtr(temp, dest); + } + + void loadValue(Address src, ValueOperand val); + void loadValue(Operand dest, ValueOperand val) { + loadValue(dest.toAddress(), val); + } + void loadValue(const BaseIndex& addr, ValueOperand val); + void tagValue(JSValueType type, Register payload, ValueOperand dest); + + void pushValue(ValueOperand val); + void popValue(ValueOperand val); + void pushValue(const Value& val) { + if (val.isMarkable()) { + writeDataRelocation(val); + movWithPatch(ImmWord(val.asRawBits()), ScratchRegister); + push(ScratchRegister); + } else { + push(ImmWord(val.asRawBits())); + } + } + void pushValue(JSValueType type, Register reg) { + boxValue(type, reg, ScratchRegister); + push(ScratchRegister); + } + void pushValue(const Address& addr); + + void handleFailureWithHandlerTail(void* handler); + + ///////////////////////////////////////////////////////////////// + // Common interface. + ///////////////////////////////////////////////////////////////// + public: + // The following functions are exposed for use in platform-shared code. + + template<typename T> + void compareExchange8SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + compareExchange(1, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void compareExchange8ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + compareExchange(1, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void compareExchange16SignExtend(const T& mem, Register oldval, Register newval, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + compareExchange(2, true, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void compareExchange16ZeroExtend(const T& mem, Register oldval, Register newval, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + compareExchange(2, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void compareExchange32(const T& mem, Register oldval, Register newval, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + compareExchange(4, false, mem, oldval, newval, valueTemp, offsetTemp, maskTemp, output); + } + + template<typename T> + void atomicExchange8SignExtend(const T& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + atomicExchange(1, true, mem, value, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void atomicExchange8ZeroExtend(const T& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + atomicExchange(1, false, mem, value, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void atomicExchange16SignExtend(const T& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + atomicExchange(2, true, mem, value, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void atomicExchange16ZeroExtend(const T& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + atomicExchange(2, false, mem, value, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T> + void atomicExchange32(const T& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) + { + atomicExchange(4, false, mem, value, valueTemp, offsetTemp, maskTemp, output); + } + + template<typename T, typename S> + void atomicFetchAdd8SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAdd8ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAdd16SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, true, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAdd16ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAdd32(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(4, false, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template <typename T, typename S> + void atomicAdd8(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(1, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicAdd16(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(2, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicAdd32(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(4, AtomicFetchAddOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + + template<typename T, typename S> + void atomicFetchSub8SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchSub8ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchSub16SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, true, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchSub16ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchSub32(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(4, false, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template <typename T, typename S> + void atomicSub8(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(1, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicSub16(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(2, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicSub32(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(4, AtomicFetchSubOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + + template<typename T, typename S> + void atomicFetchAnd8SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAnd8ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAnd16SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, true, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAnd16ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchAnd32(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(4, false, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template <typename T, typename S> + void atomicAnd8(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(1, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicAnd16(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(2, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicAnd32(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(4, AtomicFetchAndOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + + template<typename T, typename S> + void atomicFetchOr8SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchOr8ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchOr16SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, true, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchOr16ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchOr32(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(4, false, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template <typename T, typename S> + void atomicOr8(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(1, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicOr16(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(2, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicOr32(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(4, AtomicFetchOrOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + + template<typename T, typename S> + void atomicFetchXor8SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchXor8ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(1, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchXor16SignExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, true, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchXor16ZeroExtend(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(2, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template<typename T, typename S> + void atomicFetchXor32(const S& value, const T& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp, Register output) + { + atomicFetchOp(4, false, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp, output); + } + template <typename T, typename S> + void atomicXor8(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(1, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicXor16(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(2, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + template <typename T, typename S> + void atomicXor32(const T& value, const S& mem, Register flagTemp, + Register valueTemp, Register offsetTemp, Register maskTemp) + { + atomicEffectOp(4, AtomicFetchXorOp, value, mem, flagTemp, valueTemp, offsetTemp, maskTemp); + } + + template<typename T> + void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register oldval, Register newval, + Register temp, Register valueTemp, Register offsetTemp, Register maskTemp, + AnyRegister output); + + template<typename T> + void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value, + Register temp, Register valueTemp, Register offsetTemp, Register maskTemp, + AnyRegister output); + + inline void incrementInt32Value(const Address& addr); + + void move32(Imm32 imm, Register dest); + void move32(Register src, Register dest); + + void movePtr(Register src, Register dest); + void movePtr(ImmWord imm, Register dest); + void movePtr(ImmPtr imm, Register dest); + void movePtr(wasm::SymbolicAddress imm, Register dest); + void movePtr(ImmGCPtr imm, Register dest); + + void load8SignExtend(const Address& address, Register dest); + void load8SignExtend(const BaseIndex& src, Register dest); + + void load8ZeroExtend(const Address& address, Register dest); + void load8ZeroExtend(const BaseIndex& src, Register dest); + + void load16SignExtend(const Address& address, Register dest); + void load16SignExtend(const BaseIndex& src, Register dest); + + void load16ZeroExtend(const Address& address, Register dest); + void load16ZeroExtend(const BaseIndex& src, Register dest); + + void load32(const Address& address, Register dest); + void load32(const BaseIndex& address, Register dest); + void load32(AbsoluteAddress address, Register dest); + void load32(wasm::SymbolicAddress address, Register dest); + void load64(const Address& address, Register64 dest) { + loadPtr(address, dest.reg); + } + + void loadPtr(const Address& address, Register dest); + void loadPtr(const BaseIndex& src, Register dest); + void loadPtr(AbsoluteAddress address, Register dest); + void loadPtr(wasm::SymbolicAddress address, Register dest); + + void loadPrivate(const Address& address, Register dest); + + void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); } + void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); } + void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); } + + void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); } + void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); } + + void loadDouble(const Address& addr, FloatRegister dest); + void loadDouble(const BaseIndex& src, FloatRegister dest); + void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest); + + // Load a float value into a register, then expand it to a double. + void loadFloatAsDouble(const Address& addr, FloatRegister dest); + void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest); + + void loadFloat32(const Address& addr, FloatRegister dest); + void loadFloat32(const BaseIndex& src, FloatRegister dest); + void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest); + + void store8(Register src, const Address& address); + void store8(Imm32 imm, const Address& address); + void store8(Register src, const BaseIndex& address); + void store8(Imm32 imm, const BaseIndex& address); + + void store16(Register src, const Address& address); + void store16(Imm32 imm, const Address& address); + void store16(Register src, const BaseIndex& address); + void store16(Imm32 imm, const BaseIndex& address); + + void store32(Register src, AbsoluteAddress address); + void store32(Register src, const Address& address); + void store32(Register src, const BaseIndex& address); + void store32(Imm32 src, const Address& address); + void store32(Imm32 src, const BaseIndex& address); + + // NOTE: This will use second scratch on MIPS64. Only ARM needs the + // implementation without second scratch. + void store32_NoSecondScratch(Imm32 src, const Address& address) { + store32(src, address); + } + + void store64(Imm64 imm, Address address) { + storePtr(ImmWord(imm.value), address); + } + + void store64(Register64 src, Address address) { + storePtr(src.reg, address); + } + + template <typename T> void storePtr(ImmWord imm, T address); + template <typename T> void storePtr(ImmPtr imm, T address); + template <typename T> void storePtr(ImmGCPtr imm, T address); + void storePtr(Register src, const Address& address); + void storePtr(Register src, const BaseIndex& address); + void storePtr(Register src, AbsoluteAddress dest); + + void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest); + void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest); + + void moveDouble(FloatRegister src, FloatRegister dest) { + as_movd(dest, src); + } + + void zeroDouble(FloatRegister reg) { + moveToDouble(zero, reg); + } + + void convertInt64ToDouble(Register src, FloatRegister dest); + void convertInt64ToFloat32(Register src, FloatRegister dest); + + void convertUInt64ToDouble(Register src, FloatRegister dest); + void convertUInt64ToFloat32(Register src, FloatRegister dest); + + static bool convertUInt64ToDoubleNeedsTemp(); + void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp); + + void breakpoint(); + + void checkStackAlignment(); + + static void calculateAlignedStackPointer(void** stackPointer); + + // If source is a double, load it into dest. If source is int32, + // convert it to double. Else, branch to failure. + void ensureDouble(const ValueOperand& source, FloatRegister dest, Label* failure); + + void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs, Register dest); + void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs, Register dest); + + void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs, Register dest); + + void cmp64Set(Assembler::Condition cond, Register lhs, Imm32 rhs, Register dest) + { + ma_cmp_set(dest, lhs, rhs, cond); + } + + protected: + bool buildOOLFakeExitFrame(void* fakeReturnAddr); + + public: + CodeOffset labelForPatch() { + return CodeOffset(nextOffset().getOffset()); + } + + void lea(Operand addr, Register dest) { + ma_daddu(dest, addr.baseReg(), Imm32(addr.disp())); + } + + void abiret() { + as_jr(ra); + as_nop(); + } + + BufferOffset ma_BoundsCheck(Register bounded) { + BufferOffset bo = m_buffer.nextOffset(); + ma_liPatchable(bounded, ImmWord(0)); + return bo; + } + + void moveFloat32(FloatRegister src, FloatRegister dest) { + as_movs(dest, src); + } + + void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) { + loadPtr(Address(GlobalReg, globalDataOffset - WasmGlobalRegBias), dest); + } + void loadWasmPinnedRegsFromTls() { + loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg); + loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg); + ma_daddu(GlobalReg, Imm32(WasmGlobalRegBias)); + } + + // Instrumentation for entering and leaving the profiler. + void profilerEnterFrame(Register framePtr, Register scratch); + void profilerExitFrame(); +}; + +typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_MacroAssembler_mips64_h */ diff --git a/js/src/jit/mips64/MoveEmitter-mips64.cpp b/js/src/jit/mips64/MoveEmitter-mips64.cpp new file mode 100644 index 000000000..d208b83a7 --- /dev/null +++ b/js/src/jit/mips64/MoveEmitter-mips64.cpp @@ -0,0 +1,155 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jit/mips64/MoveEmitter-mips64.h" + +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace js::jit; + +void +MoveEmitterMIPS64::breakCycle(const MoveOperand& from, const MoveOperand& to, + MoveOp::Type type, uint32_t slotId) +{ + // There is some pattern: + // (A -> B) + // (B -> A) + // + // This case handles (A -> B), which we reach first. We save B, then allow + // the original move to continue. + switch (type) { + case MoveOp::FLOAT32: + if (to.isMemory()) { + FloatRegister temp = ScratchFloat32Reg; + masm.loadFloat32(getAdjustedAddress(to), temp); + masm.storeFloat32(temp, cycleSlot(slotId)); + } else { + masm.storeFloat32(to.floatReg(), cycleSlot(slotId)); + } + break; + case MoveOp::DOUBLE: + if (to.isMemory()) { + FloatRegister temp = ScratchDoubleReg; + masm.loadDouble(getAdjustedAddress(to), temp); + masm.storeDouble(temp, cycleSlot(slotId)); + } else { + masm.storeDouble(to.floatReg(), cycleSlot(slotId)); + } + break; + case MoveOp::INT32: + if (to.isMemory()) { + Register temp = tempReg(); + masm.load32(getAdjustedAddress(to), temp); + masm.store32(temp, cycleSlot(0)); + } else { + // Second scratch register should not be moved by MoveEmitter. + MOZ_ASSERT(to.reg() != spilledReg_); + masm.store32(to.reg(), cycleSlot(0)); + } + break; + case MoveOp::GENERAL: + if (to.isMemory()) { + Register temp = tempReg(); + masm.loadPtr(getAdjustedAddress(to), temp); + masm.storePtr(temp, cycleSlot(0)); + } else { + // Second scratch register should not be moved by MoveEmitter. + MOZ_ASSERT(to.reg() != spilledReg_); + masm.storePtr(to.reg(), cycleSlot(0)); + } + break; + default: + MOZ_CRASH("Unexpected move type"); + } +} + +void +MoveEmitterMIPS64::completeCycle(const MoveOperand& from, const MoveOperand& to, + MoveOp::Type type, uint32_t slotId) +{ + // There is some pattern: + // (A -> B) + // (B -> A) + // + // This case handles (B -> A), which we reach last. We emit a move from the + // saved value of B, to A. + switch (type) { + case MoveOp::FLOAT32: + if (to.isMemory()) { + FloatRegister temp = ScratchFloat32Reg; + masm.loadFloat32(cycleSlot(slotId), temp); + masm.storeFloat32(temp, getAdjustedAddress(to)); + } else { + masm.loadFloat32(cycleSlot(slotId), to.floatReg()); + } + break; + case MoveOp::DOUBLE: + if (to.isMemory()) { + FloatRegister temp = ScratchDoubleReg; + masm.loadDouble(cycleSlot(slotId), temp); + masm.storeDouble(temp, getAdjustedAddress(to)); + } else { + masm.loadDouble(cycleSlot(slotId), to.floatReg()); + } + break; + case MoveOp::INT32: + MOZ_ASSERT(slotId == 0); + if (to.isMemory()) { + Register temp = tempReg(); + masm.load32(cycleSlot(0), temp); + masm.store32(temp, getAdjustedAddress(to)); + } else { + // Second scratch register should not be moved by MoveEmitter. + MOZ_ASSERT(to.reg() != spilledReg_); + masm.load32(cycleSlot(0), to.reg()); + } + break; + case MoveOp::GENERAL: + MOZ_ASSERT(slotId == 0); + if (to.isMemory()) { + Register temp = tempReg(); + masm.loadPtr(cycleSlot(0), temp); + masm.storePtr(temp, getAdjustedAddress(to)); + } else { + // Second scratch register should not be moved by MoveEmitter. + MOZ_ASSERT(to.reg() != spilledReg_); + masm.loadPtr(cycleSlot(0), to.reg()); + } + break; + default: + MOZ_CRASH("Unexpected move type"); + } +} + +void +MoveEmitterMIPS64::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) +{ + // Ensure that we can use ScratchDoubleReg in memory move. + MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg); + MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg); + + if (from.isFloatReg()) { + if (to.isFloatReg()) { + masm.moveDouble(from.floatReg(), to.floatReg()); + } else if (to.isGeneralReg()) { + masm.moveFromDouble(from.floatReg(), to.reg()); + } else { + MOZ_ASSERT(to.isMemory()); + masm.storeDouble(from.floatReg(), getAdjustedAddress(to)); + } + } else if (to.isFloatReg()) { + if (from.isMemory()) + masm.loadDouble(getAdjustedAddress(from), to.floatReg()); + else + masm.moveToDouble(from.reg(), to.floatReg()); + } else { + MOZ_ASSERT(from.isMemory()); + MOZ_ASSERT(to.isMemory()); + masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg); + masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to)); + } +} diff --git a/js/src/jit/mips64/MoveEmitter-mips64.h b/js/src/jit/mips64/MoveEmitter-mips64.h new file mode 100644 index 000000000..77e412fb4 --- /dev/null +++ b/js/src/jit/mips64/MoveEmitter-mips64.h @@ -0,0 +1,34 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_MoveEmitter_mips64_h +#define jit_mips64_MoveEmitter_mips64_h + +#include "jit/mips-shared/MoveEmitter-mips-shared.h" + +namespace js { +namespace jit { + +class MoveEmitterMIPS64 : public MoveEmitterMIPSShared +{ + void emitDoubleMove(const MoveOperand& from, const MoveOperand& to); + void breakCycle(const MoveOperand& from, const MoveOperand& to, + MoveOp::Type type, uint32_t slot); + void completeCycle(const MoveOperand& from, const MoveOperand& to, + MoveOp::Type type, uint32_t slot); + + public: + MoveEmitterMIPS64(MacroAssembler& masm) + : MoveEmitterMIPSShared(masm) + { } +}; + +typedef MoveEmitterMIPS64 MoveEmitter; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_MoveEmitter_mips64_h */ diff --git a/js/src/jit/mips64/SharedIC-mips64.cpp b/js/src/jit/mips64/SharedIC-mips64.cpp new file mode 100644 index 000000000..ee325277f --- /dev/null +++ b/js/src/jit/mips64/SharedIC-mips64.cpp @@ -0,0 +1,191 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "jsiter.h" + +#include "jit/BaselineCompiler.h" +#include "jit/BaselineIC.h" +#include "jit/BaselineJIT.h" +#include "jit/Linker.h" +#include "jit/SharedICHelpers.h" + +#include "jsboolinlines.h" + +using namespace js; +using namespace js::jit; + +namespace js { +namespace jit { + +// ICBinaryArith_Int32 + +bool +ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + // Guard that R0 is an integer and R1 is an integer. + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + masm.branchTestInt32(Assembler::NotEqual, R1, &failure); + + // Add R0 and R1. Don't need to explicitly unbox, just use R2's valueReg. + Register scratchReg = R2.valueReg(); + + Label goodMul, divTest1, divTest2; + switch(op_) { + case JSOP_ADD: + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + masm.ma_addTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); + masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); + break; + case JSOP_SUB: + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + masm.ma_subTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); + masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); + break; + case JSOP_MUL: { + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + masm.ma_mul_branch_overflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); + + masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump); + + // Result is -0 if operands have different signs. + masm.as_xor(t8, ExtractTemp0, ExtractTemp1); + masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump); + + masm.bind(&goodMul); + masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); + break; + } + case JSOP_DIV: + case JSOP_MOD: { + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + // Check for INT_MIN / -1, it results in a double. + masm.ma_b(ExtractTemp0, Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump); + masm.ma_b(ExtractTemp1, Imm32(-1), &failure, Assembler::Equal, ShortJump); + masm.bind(&divTest1); + + // Check for division by zero + masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::Equal, ShortJump); + + // Check for 0 / X with X < 0 (results in -0). + masm.ma_b(ExtractTemp0, Imm32(0), &divTest2, Assembler::NotEqual, ShortJump); + masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::LessThan, ShortJump); + masm.bind(&divTest2); + + masm.as_div(ExtractTemp0, ExtractTemp1); + + if (op_ == JSOP_DIV) { + // Result is a double if the remainder != 0. + masm.as_mfhi(scratchReg); + masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump); + masm.as_mflo(scratchReg); + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + } else { + Label done; + // If X % Y == 0 and X < 0, the result is -0. + masm.as_mfhi(scratchReg); + masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump); + masm.ma_b(ExtractTemp0, Imm32(0), &failure, Assembler::LessThan, ShortJump); + masm.bind(&done); + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + } + break; + } + case JSOP_BITOR: + masm.as_or(R0.valueReg() , R0.valueReg(), R1.valueReg()); + break; + case JSOP_BITXOR: + masm.as_xor(scratchReg, R0.valueReg(), R1.valueReg()); + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + break; + case JSOP_BITAND: + masm.as_and(R0.valueReg() , R0.valueReg(), R1.valueReg()); + break; + case JSOP_LSH: + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + // MIPS will only use 5 lowest bits in R1 as shift offset. + masm.ma_sll(scratchReg, ExtractTemp0, ExtractTemp1); + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + break; + case JSOP_RSH: + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + masm.ma_sra(scratchReg, ExtractTemp0, ExtractTemp1); + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + break; + case JSOP_URSH: + masm.unboxInt32(R0, ExtractTemp0); + masm.unboxInt32(R1, ExtractTemp1); + masm.ma_srl(scratchReg, ExtractTemp0, ExtractTemp1); + if (allowDouble_) { + Label toUint; + masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump); + + // Move result and box for return. + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + EmitReturnFromIC(masm); + + masm.bind(&toUint); + masm.convertUInt32ToDouble(scratchReg, FloatReg1); + masm.boxDouble(FloatReg1, R0); + } else { + masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump); + // Move result for return. + masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); + } + break; + default: + MOZ_CRASH("Unhandled op for BinaryArith_Int32."); + } + + EmitReturnFromIC(masm); + + // Failure case - jump to next stub + masm.bind(&failure); + EmitStubGuardFailure(masm); + + return true; +} + +bool +ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) +{ + Label failure; + masm.branchTestInt32(Assembler::NotEqual, R0, &failure); + + switch (op) { + case JSOP_BITNOT: + masm.not32(R0.valueReg()); + masm.tagValue(JSVAL_TYPE_INT32, R0.valueReg(), R0); + break; + case JSOP_NEG: + masm.unboxInt32(R0, ExtractTemp0); + // Guard against 0 and MIN_INT, both result in a double. + masm.branchTest32(Assembler::Zero, ExtractTemp0, Imm32(INT32_MAX), &failure); + + masm.neg32(ExtractTemp0); + masm.tagValue(JSVAL_TYPE_INT32, ExtractTemp0, R0); + break; + default: + MOZ_CRASH("Unexpected op"); + return false; + } + + EmitReturnFromIC(masm); + + masm.bind(&failure); + EmitStubGuardFailure(masm); + return true; +} + + +} // namespace jit +} // namespace js diff --git a/js/src/jit/mips64/SharedICRegisters-mips64.h b/js/src/jit/mips64/SharedICRegisters-mips64.h new file mode 100644 index 000000000..401aca1f0 --- /dev/null +++ b/js/src/jit/mips64/SharedICRegisters-mips64.h @@ -0,0 +1,47 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef jit_mips64_SharedICRegisters_mips64_h +#define jit_mips64_SharedICRegisters_mips64_h + +#include "jit/MacroAssembler.h" + +namespace js { +namespace jit { + +static constexpr Register BaselineFrameReg = s5; +static constexpr Register BaselineStackReg = sp; + +// ValueOperands R0, R1, and R2. +// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value +// should be preserved across calls. +static constexpr ValueOperand R0(v1); +static constexpr ValueOperand R1(s4); +static constexpr ValueOperand R2(a6); + +// ICTailCallReg and ICStubReg +// These use registers that are not preserved across calls. +static constexpr Register ICTailCallReg = ra; +static constexpr Register ICStubReg = a5; + +static constexpr Register ExtractTemp0 = s6; +static constexpr Register ExtractTemp1 = s7; + +// Register used internally by MacroAssemblerMIPS. +static constexpr Register BaselineSecondScratchReg = SecondScratchReg; + +// Note that ICTailCallReg is actually just the link register. +// In MIPS code emission, we do not clobber ICTailCallReg since we keep +// the return address for calls there. + +// FloatReg0 must be equal to ReturnFloatReg. +static constexpr FloatRegister FloatReg0 = f0; +static constexpr FloatRegister FloatReg1 = f2; + +} // namespace jit +} // namespace js + +#endif /* jit_mips64_SharedICRegisters_mips64_h */ diff --git a/js/src/jit/mips64/Simulator-mips64.cpp b/js/src/jit/mips64/Simulator-mips64.cpp new file mode 100644 index 000000000..fcdf41fac --- /dev/null +++ b/js/src/jit/mips64/Simulator-mips64.cpp @@ -0,0 +1,3874 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: */ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "jit/mips64/Simulator-mips64.h" + +#include "mozilla/Casting.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/IntegerPrintfMacros.h" +#include "mozilla/Likely.h" +#include "mozilla/MathAlgorithms.h" + +#include <float.h> + +#include "jit/mips64/Assembler-mips64.h" +#include "threading/LockGuard.h" +#include "vm/Runtime.h" + +#define I8(v) static_cast<int8_t>(v) +#define I16(v) static_cast<int16_t>(v) +#define U16(v) static_cast<uint16_t>(v) +#define I32(v) static_cast<int32_t>(v) +#define U32(v) static_cast<uint32_t>(v) +#define I64(v) static_cast<int64_t>(v) +#define U64(v) static_cast<uint64_t>(v) +#define I128(v) static_cast<__int128_t>(v) +#define U128(v) static_cast<__uint128_t>(v) + +namespace js { +namespace jit { + +static const Instr kCallRedirInstr = op_special | MAX_BREAK_CODE << FunctionBits | ff_break; + +// Utils functions. +static uint32_t +GetFCSRConditionBit(uint32_t cc) +{ + if (cc == 0) + return 23; + return 24 + cc; +} + +// ----------------------------------------------------------------------------- +// MIPS assembly various constants. + +class SimInstruction +{ + public: + enum { + kInstrSize = 4, + // On MIPS PC cannot actually be directly accessed. We behave as if PC was + // always the value of the current instruction being executed. + kPCReadOffset = 0 + }; + + // Get the raw instruction bits. + inline Instr instructionBits() const { + return *reinterpret_cast<const Instr*>(this); + } + + // Set the raw instruction bits to value. + inline void setInstructionBits(Instr value) { + *reinterpret_cast<Instr*>(this) = value; + } + + // Read one particular bit out of the instruction bits. + inline int bit(int nr) const { + return (instructionBits() >> nr) & 1; + } + + // Read a bit field out of the instruction bits. + inline int bits(int hi, int lo) const { + return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1); + } + + // Instruction type. + enum Type { + kRegisterType, + kImmediateType, + kJumpType, + kUnsupported = -1 + }; + + // Get the encoding type of the instruction. + Type instructionType() const; + + + // Accessors for the different named fields used in the MIPS encoding. + inline Opcode opcodeValue() const { + return static_cast<Opcode>(bits(OpcodeShift + OpcodeBits - 1, OpcodeShift)); + } + + inline int rsValue() const { + MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType); + return bits(RSShift + RSBits - 1, RSShift); + } + + inline int rtValue() const { + MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType); + return bits(RTShift + RTBits - 1, RTShift); + } + + inline int rdValue() const { + MOZ_ASSERT(instructionType() == kRegisterType); + return bits(RDShift + RDBits - 1, RDShift); + } + + inline int saValue() const { + MOZ_ASSERT(instructionType() == kRegisterType); + return bits(SAShift + SABits - 1, SAShift); + } + + inline int functionValue() const { + MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType); + return bits(FunctionShift + FunctionBits - 1, FunctionShift); + } + + inline int fdValue() const { + return bits(FDShift + FDBits - 1, FDShift); + } + + inline int fsValue() const { + return bits(FSShift + FSBits - 1, FSShift); + } + + inline int ftValue() const { + return bits(FTShift + FTBits - 1, FTShift); + } + + inline int frValue() const { + return bits(FRShift + FRBits - 1, FRShift); + } + + // Float Compare condition code instruction bits. + inline int fcccValue() const { + return bits(FCccShift + FCccBits - 1, FCccShift); + } + + // Float Branch condition code instruction bits. + inline int fbccValue() const { + return bits(FBccShift + FBccBits - 1, FBccShift); + } + + // Float Branch true/false instruction bit. + inline int fbtrueValue() const { + return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift); + } + + // Return the fields at their original place in the instruction encoding. + inline Opcode opcodeFieldRaw() const { + return static_cast<Opcode>(instructionBits() & OpcodeMask); + } + + inline int rsFieldRaw() const { + MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType); + return instructionBits() & RSMask; + } + + // Same as above function, but safe to call within instructionType(). + inline int rsFieldRawNoAssert() const { + return instructionBits() & RSMask; + } + + inline int rtFieldRaw() const { + MOZ_ASSERT(instructionType() == kRegisterType || instructionType() == kImmediateType); + return instructionBits() & RTMask; + } + + inline int rdFieldRaw() const { + MOZ_ASSERT(instructionType() == kRegisterType); + return instructionBits() & RDMask; + } + + inline int saFieldRaw() const { + MOZ_ASSERT(instructionType() == kRegisterType); + return instructionBits() & SAMask; + } + + inline int functionFieldRaw() const { + return instructionBits() & FunctionMask; + } + + // Get the secondary field according to the opcode. + inline int secondaryValue() const { + Opcode op = opcodeFieldRaw(); + switch (op) { + case op_special: + case op_special2: + return functionValue(); + case op_cop1: + return rsValue(); + case op_regimm: + return rtValue(); + default: + return ff_null; + } + } + + inline int32_t imm16Value() const { + MOZ_ASSERT(instructionType() == kImmediateType); + return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift); + } + + inline int32_t imm26Value() const { + MOZ_ASSERT(instructionType() == kJumpType); + return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift); + } + + // Say if the instruction should not be used in a branch delay slot. + bool isForbiddenInBranchDelay() const; + // Say if the instruction 'links'. e.g. jal, bal. + bool isLinkingInstruction() const; + // Say if the instruction is a break or a trap. + bool isTrap() const; + + private: + + SimInstruction() = delete; + SimInstruction(const SimInstruction& other) = delete; + void operator=(const SimInstruction& other) = delete; +}; + +bool +SimInstruction::isForbiddenInBranchDelay() const +{ + const int op = opcodeFieldRaw(); + switch (op) { + case op_j: + case op_jal: + case op_beq: + case op_bne: + case op_blez: + case op_bgtz: + case op_beql: + case op_bnel: + case op_blezl: + case op_bgtzl: + return true; + case op_regimm: + switch (rtFieldRaw()) { + case rt_bltz: + case rt_bgez: + case rt_bltzal: + case rt_bgezal: + return true; + default: + return false; + }; + break; + case op_special: + switch (functionFieldRaw()) { + case ff_jr: + case ff_jalr: + return true; + default: + return false; + }; + break; + default: + return false; + }; +} + +bool +SimInstruction::isLinkingInstruction() const +{ + const int op = opcodeFieldRaw(); + switch (op) { + case op_jal: + return true; + case op_regimm: + switch (rtFieldRaw()) { + case rt_bgezal: + case rt_bltzal: + return true; + default: + return false; + }; + case op_special: + switch (functionFieldRaw()) { + case ff_jalr: + return true; + default: + return false; + }; + default: + return false; + }; +} + +bool +SimInstruction::isTrap() const +{ + if (opcodeFieldRaw() != op_special) { + return false; + } else { + switch (functionFieldRaw()) { + case ff_break: + case ff_tge: + case ff_tgeu: + case ff_tlt: + case ff_tltu: + case ff_teq: + case ff_tne: + return true; + default: + return false; + }; + } +} + +SimInstruction::Type +SimInstruction::instructionType() const +{ + switch (opcodeFieldRaw()) { + case op_special: + switch (functionFieldRaw()) { + case ff_jr: + case ff_jalr: + case ff_sync: + case ff_break: + case ff_sll: + case ff_dsll: + case ff_dsll32: + case ff_srl: + case ff_dsrl: + case ff_dsrl32: + case ff_sra: + case ff_dsra: + case ff_dsra32: + case ff_sllv: + case ff_dsllv: + case ff_srlv: + case ff_dsrlv: + case ff_srav: + case ff_dsrav: + case ff_mfhi: + case ff_mflo: + case ff_mult: + case ff_dmult: + case ff_multu: + case ff_dmultu: + case ff_div: + case ff_ddiv: + case ff_divu: + case ff_ddivu: + case ff_add: + case ff_dadd: + case ff_addu: + case ff_daddu: + case ff_sub: + case ff_dsub: + case ff_subu: + case ff_dsubu: + case ff_and: + case ff_or: + case ff_xor: + case ff_nor: + case ff_slt: + case ff_sltu: + case ff_tge: + case ff_tgeu: + case ff_tlt: + case ff_tltu: + case ff_teq: + case ff_tne: + case ff_movz: + case ff_movn: + case ff_movci: + return kRegisterType; + default: + return kUnsupported; + }; + break; + case op_special2: + switch (functionFieldRaw()) { + case ff_mul: + case ff_clz: + case ff_dclz: + return kRegisterType; + default: + return kUnsupported; + }; + break; + case op_special3: + switch (functionFieldRaw()) { + case ff_ins: + case ff_dins: + case ff_dinsm: + case ff_dinsu: + case ff_ext: + case ff_dext: + case ff_dextm: + case ff_dextu: + case ff_bshfl: + return kRegisterType; + default: + return kUnsupported; + }; + break; + case op_cop1: // Coprocessor instructions. + switch (rsFieldRawNoAssert()) { + case rs_bc1: // Branch on coprocessor condition. + return kImmediateType; + default: + return kRegisterType; + }; + break; + case op_cop1x: + return kRegisterType; + // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16. + case op_regimm: + case op_beq: + case op_bne: + case op_blez: + case op_bgtz: + case op_addi: + case op_daddi: + case op_addiu: + case op_daddiu: + case op_slti: + case op_sltiu: + case op_andi: + case op_ori: + case op_xori: + case op_lui: + case op_beql: + case op_bnel: + case op_blezl: + case op_bgtzl: + case op_lb: + case op_lbu: + case op_lh: + case op_lhu: + case op_lw: + case op_lwu: + case op_lwl: + case op_lwr: + case op_ll: + case op_ld: + case op_ldl: + case op_ldr: + case op_sb: + case op_sh: + case op_sw: + case op_swl: + case op_swr: + case op_sc: + case op_sd: + case op_sdl: + case op_sdr: + case op_lwc1: + case op_ldc1: + case op_swc1: + case op_sdc1: + return kImmediateType; + // 26 bits immediate type instructions. e.g.: j imm26. + case op_j: + case op_jal: + return kJumpType; + default: + return kUnsupported; + }; + return kUnsupported; +} + +// C/C++ argument slots size. +const int kCArgSlotCount = 0; +const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t); +const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize; + +class CachePage { + public: + static const int LINE_VALID = 0; + static const int LINE_INVALID = 1; + + static const int kPageShift = 12; + static const int kPageSize = 1 << kPageShift; + static const int kPageMask = kPageSize - 1; + static const int kLineShift = 2; // The cache line is only 4 bytes right now. + static const int kLineLength = 1 << kLineShift; + static const int kLineMask = kLineLength - 1; + + CachePage() { + memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); + } + + char* validityByte(int offset) { + return &validity_map_[offset >> kLineShift]; + } + + char* cachedData(int offset) { + return &data_[offset]; + } + + private: + char data_[kPageSize]; // The cached data. + static const int kValidityMapSize = kPageSize >> kLineShift; + char validity_map_[kValidityMapSize]; // One byte per line. +}; + +// Protects the icache() and redirection() properties of the +// Simulator. +class AutoLockSimulatorCache : public LockGuard<Mutex> +{ + using Base = LockGuard<Mutex>; + + public: + explicit AutoLockSimulatorCache(Simulator* sim) + : Base(sim->cacheLock_) + , sim_(sim) + { + MOZ_ASSERT(sim_->cacheLockHolder_.isNothing()); +#ifdef DEBUG + sim_->cacheLockHolder_ = mozilla::Some(ThisThread::GetId()); +#endif + } + + ~AutoLockSimulatorCache() { + MOZ_ASSERT(sim_->cacheLockHolder_.isSome()); +#ifdef DEBUG + sim_->cacheLockHolder_.reset(); +#endif + } + + private: + Simulator* const sim_; +}; + +bool Simulator::ICacheCheckingEnabled = false; + +int64_t Simulator::StopSimAt = -1; + +Simulator * +Simulator::Create(JSContext* cx) +{ + Simulator* sim = js_new<Simulator>(); + if (!sim) + return nullptr; + + if (!sim->init()) { + js_delete(sim); + return nullptr; + } + + if (getenv("MIPS_SIM_ICACHE_CHECKS")) + Simulator::ICacheCheckingEnabled = true; + + int64_t stopAt; + char* stopAtStr = getenv("MIPS_SIM_STOP_AT"); + if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) { + fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt); + Simulator::StopSimAt = stopAt; + } + + return sim; +} + +void +Simulator::Destroy(Simulator* sim) +{ + js_delete(sim); +} + +// The MipsDebugger class is used by the simulator while debugging simulated +// code. +class MipsDebugger +{ + public: + explicit MipsDebugger(Simulator* sim) : sim_(sim) { } + + void stop(SimInstruction* instr); + void debug(); + // Print all registers with a nice formatting. + void printAllRegs(); + void printAllRegsIncludingFPU(); + + private: + // We set the breakpoint code to 0xfffff to easily recognize it. + static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6; + static const Instr kNopInstr = op_special | ff_sll; + + Simulator* sim_; + + int64_t getRegisterValue(int regnum); + int64_t getFPURegisterValueLong(int regnum); + float getFPURegisterValueFloat(int regnum); + double getFPURegisterValueDouble(int regnum); + bool getValue(const char* desc, int64_t* value); + + // Set or delete a breakpoint. Returns true if successful. + bool setBreakpoint(SimInstruction* breakpc); + bool deleteBreakpoint(SimInstruction* breakpc); + + // Undo and redo all breakpoints. This is needed to bracket disassembly and + // execution to skip past breakpoints when run from the debugger. + void undoBreakpoints(); + void redoBreakpoints(); +}; + +static void +UNSUPPORTED() +{ + printf("Unsupported instruction.\n"); + MOZ_CRASH(); +} + +void +MipsDebugger::stop(SimInstruction* instr) +{ + // Get the stop code. + uint32_t code = instr->bits(25, 6); + // Retrieve the encoded address, which comes just after this stop. + char* msg = *reinterpret_cast<char**>(sim_->get_pc() + + SimInstruction::kInstrSize); + // Update this stop description. + if (!sim_->watchedStops_[code].desc_) + sim_->watchedStops_[code].desc_ = msg; + // Print the stop message and code if it is not the default code. + if (code != kMaxStopCode) + printf("Simulator hit stop %u: %s\n", code, msg); + else + printf("Simulator hit %s\n", msg); + sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize); + debug(); +} + +int64_t +MipsDebugger::getRegisterValue(int regnum) +{ + if (regnum == kPCRegister) + return sim_->get_pc(); + return sim_->getRegister(regnum); +} + +int64_t +MipsDebugger::getFPURegisterValueLong(int regnum) +{ + return sim_->getFpuRegister(regnum); +} + +float +MipsDebugger::getFPURegisterValueFloat(int regnum) +{ + return sim_->getFpuRegisterFloat(regnum); +} + +double +MipsDebugger::getFPURegisterValueDouble(int regnum) +{ + return sim_->getFpuRegisterDouble(regnum); +} + +bool +MipsDebugger::getValue(const char* desc, int64_t* value) +{ + Register reg = Register::FromName(desc); + if (reg != InvalidReg) { + *value = getRegisterValue(reg.code()); + return true; + } + + if (strncmp(desc, "0x", 2) == 0) + return sscanf(desc, "%" PRIu64, reinterpret_cast<uint64_t*>(value)) == 1; + return sscanf(desc, "%" PRIi64, value) == 1; +} + +bool +MipsDebugger::setBreakpoint(SimInstruction* breakpc) +{ + // Check if a breakpoint can be set. If not return without any side-effects. + if (sim_->break_pc_ != nullptr) + return false; + + // Set the breakpoint. + sim_->break_pc_ = breakpc; + sim_->break_instr_ = breakpc->instructionBits(); + // Not setting the breakpoint instruction in the code itself. It will be set + // when the debugger shell continues. + return true; + +} + +bool +MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) +{ + if (sim_->break_pc_ != nullptr) + sim_->break_pc_->setInstructionBits(sim_->break_instr_); + + sim_->break_pc_ = nullptr; + sim_->break_instr_ = 0; + return true; +} + +void +MipsDebugger::undoBreakpoints() +{ + if (sim_->break_pc_) + sim_->break_pc_->setInstructionBits(sim_->break_instr_); +} + +void +MipsDebugger::redoBreakpoints() +{ + if (sim_->break_pc_) + sim_->break_pc_->setInstructionBits(kBreakpointInstr); +} + +void +MipsDebugger::printAllRegs() +{ + int64_t value; + for (uint32_t i = 0; i < Registers::Total; i++) { + value = getRegisterValue(i); + printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i), value, value); + + if (i % 2) + printf("\n"); + } + printf("\n"); + + value = getRegisterValue(Simulator::LO); + printf(" LO: 0x%016" PRIx64 " %20" PRIi64 " ", value, value); + value = getRegisterValue(Simulator::HI); + printf(" HI: 0x%016" PRIx64 " %20" PRIi64 "\n", value, value); + value = getRegisterValue(Simulator::pc); + printf(" pc: 0x%016" PRIx64 "\n", value); +} + +void +MipsDebugger::printAllRegsIncludingFPU() +{ + printAllRegs(); + + printf("\n\n"); + // f0, f1, f2, ... f31. + for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) { + printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n", + FloatRegisters::GetName(i), + getFPURegisterValueLong(i), + getFPURegisterValueFloat(i), + getFPURegisterValueDouble(i)); + } +} + +static char* +ReadLine(const char* prompt) +{ + char* result = nullptr; + char lineBuf[256]; + int offset = 0; + bool keepGoing = true; + fprintf(stdout, "%s", prompt); + fflush(stdout); + while (keepGoing) { + if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) { + // fgets got an error. Just give up. + if (result) + js_delete(result); + return nullptr; + } + int len = strlen(lineBuf); + if (len > 0 && lineBuf[len - 1] == '\n') { + // Since we read a new line we are done reading the line. This + // will exit the loop after copying this buffer into the result. + keepGoing = false; + } + if (!result) { + // Allocate the initial result and make room for the terminating '\0' + result = (char*)js_malloc(len + 1); + if (!result) + return nullptr; + } else { + // Allocate a new result with enough room for the new addition. + int new_len = offset + len + 1; + char* new_result = (char*)js_malloc(new_len); + if (!new_result) + return nullptr; + // Copy the existing input into the new array and set the new + // array as the result. + memcpy(new_result, result, offset * sizeof(char)); + js_free(result); + result = new_result; + } + // Copy the newly read line into the result. + memcpy(result + offset, lineBuf, len * sizeof(char)); + offset += len; + } + + MOZ_ASSERT(result); + result[offset] = '\0'; + return result; +} + +static void +DisassembleInstruction(uint64_t pc) +{ + uint8_t* bytes = reinterpret_cast<uint8_t*>(pc); + char hexbytes[256]; + sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2], bytes[3]); + char llvmcmd[1024]; + sprintf(llvmcmd, "bash -c \"echo -n '%p'; echo '%s' | " + "llvm-mc -disassemble -arch=mips64el -mcpu=mips64r2 | " + "grep -v pure_instructions | grep -v .text\"", static_cast<void*>(bytes), hexbytes); + if (system(llvmcmd)) + printf("Cannot disassemble instruction.\n"); +} + +void +MipsDebugger::debug() +{ + intptr_t lastPC = -1; + bool done = false; + +#define COMMAND_SIZE 63 +#define ARG_SIZE 255 + +#define STR(a) #a +#define XSTR(a) STR(a) + + char cmd[COMMAND_SIZE + 1]; + char arg1[ARG_SIZE + 1]; + char arg2[ARG_SIZE + 1]; + char* argv[3] = { cmd, arg1, arg2 }; + + // Make sure to have a proper terminating character if reaching the limit. + cmd[COMMAND_SIZE] = 0; + arg1[ARG_SIZE] = 0; + arg2[ARG_SIZE] = 0; + + // Undo all set breakpoints while running in the debugger shell. This will + // make them invisible to all commands. + undoBreakpoints(); + + while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { + if (lastPC != sim_->get_pc()) { + DisassembleInstruction(sim_->get_pc()); + lastPC = sim_->get_pc(); + } + char* line = ReadLine("sim> "); + if (line == nullptr) { + break; + } else { + char* last_input = sim_->lastDebuggerInput(); + if (strcmp(line, "\n") == 0 && last_input != nullptr) { + line = last_input; + } else { + // Ownership is transferred to sim_; + sim_->setLastDebuggerInput(line); + } + // Use sscanf to parse the individual parts of the command line. At the + // moment no command expects more than two parameters. + int argc = sscanf(line, + "%" XSTR(COMMAND_SIZE) "s " + "%" XSTR(ARG_SIZE) "s " + "%" XSTR(ARG_SIZE) "s", + cmd, arg1, arg2); + if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { + SimInstruction* instr = reinterpret_cast<SimInstruction*>(sim_->get_pc()); + if (!(instr->isTrap()) || + instr->instructionBits() == kCallRedirInstr) { + sim_->instructionDecode( + reinterpret_cast<SimInstruction*>(sim_->get_pc())); + } else { + // Allow si to jump over generated breakpoints. + printf("/!\\ Jumping over generated breakpoint.\n"); + sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize); + } + } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { + // Execute the one instruction we broke at with breakpoints disabled. + sim_->instructionDecode(reinterpret_cast<SimInstruction*>(sim_->get_pc())); + // Leave the debugger shell. + done = true; + } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { + if (argc == 2) { + int64_t value; + if (strcmp(arg1, "all") == 0) { + printAllRegs(); + } else if (strcmp(arg1, "allf") == 0) { + printAllRegsIncludingFPU(); + } else { + Register reg = Register::FromName(arg1); + FloatRegisters::Encoding fReg = FloatRegisters::FromName(arg1); + if (reg != InvalidReg) { + value = getRegisterValue(reg.code()); + printf("%s: 0x%016" PRIi64 " %20" PRIi64 " \n", arg1, value, value); + } else if (fReg != FloatRegisters::Invalid) { + printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n", + FloatRegisters::GetName(fReg), + getFPURegisterValueLong(fReg), + getFPURegisterValueFloat(fReg), + getFPURegisterValueDouble(fReg)); + } else { + printf("%s unrecognized\n", arg1); + } + } + } else { + printf("print <register> or print <fpu register> single\n"); + } + } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) { + int64_t* cur = nullptr; + int64_t* end = nullptr; + int next_arg = 1; + + if (strcmp(cmd, "stack") == 0) { + cur = reinterpret_cast<int64_t*>(sim_->getRegister(Simulator::sp)); + } else { // Command "mem". + int64_t value; + if (!getValue(arg1, &value)) { + printf("%s unrecognized\n", arg1); + continue; + } + cur = reinterpret_cast<int64_t*>(value); + next_arg++; + } + + int64_t words; + if (argc == next_arg) { + words = 10; + } else { + if (!getValue(argv[next_arg], &words)) { + words = 10; + } + } + end = cur + words; + + while (cur < end) { + printf(" %p: 0x%016" PRIx64 " %20" PRIi64, cur, *cur, *cur); + printf("\n"); + cur++; + } + + } else if ((strcmp(cmd, "disasm") == 0) || + (strcmp(cmd, "dpc") == 0) || + (strcmp(cmd, "di") == 0)) { + uint8_t* cur = nullptr; + uint8_t* end = nullptr; + + if (argc == 1) { + cur = reinterpret_cast<uint8_t*>(sim_->get_pc()); + end = cur + (10 * SimInstruction::kInstrSize); + } else if (argc == 2) { + Register reg = Register::FromName(arg1); + if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) { + // The argument is an address or a register name. + int64_t value; + if (getValue(arg1, &value)) { + cur = reinterpret_cast<uint8_t*>(value); + // Disassemble 10 instructions at <arg1>. + end = cur + (10 * SimInstruction::kInstrSize); + } + } else { + // The argument is the number of instructions. + int64_t value; + if (getValue(arg1, &value)) { + cur = reinterpret_cast<uint8_t*>(sim_->get_pc()); + // Disassemble <arg1> instructions. + end = cur + (value * SimInstruction::kInstrSize); + } + } + } else { + int64_t value1; + int64_t value2; + if (getValue(arg1, &value1) && getValue(arg2, &value2)) { + cur = reinterpret_cast<uint8_t*>(value1); + end = cur + (value2 * SimInstruction::kInstrSize); + } + } + + while (cur < end) { + DisassembleInstruction(uint64_t(cur)); + cur += SimInstruction::kInstrSize; + } + } else if (strcmp(cmd, "gdb") == 0) { + printf("relinquishing control to gdb\n"); + asm("int $3"); + printf("regaining control from gdb\n"); + } else if (strcmp(cmd, "break") == 0) { + if (argc == 2) { + int64_t value; + if (getValue(arg1, &value)) { + if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) + printf("setting breakpoint failed\n"); + } else { + printf("%s unrecognized\n", arg1); + } + } else { + printf("break <address>\n"); + } + } else if (strcmp(cmd, "del") == 0) { + if (!deleteBreakpoint(nullptr)) { + printf("deleting breakpoint failed\n"); + } + } else if (strcmp(cmd, "flags") == 0) { + printf("No flags on MIPS !\n"); + } else if (strcmp(cmd, "stop") == 0) { + int64_t value; + intptr_t stop_pc = sim_->get_pc() - + 2 * SimInstruction::kInstrSize; + SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc); + SimInstruction* msg_address = + reinterpret_cast<SimInstruction*>(stop_pc + + SimInstruction::kInstrSize); + if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { + // Remove the current stop. + if (sim_->isStopInstruction(stop_instr)) { + stop_instr->setInstructionBits(kNopInstr); + msg_address->setInstructionBits(kNopInstr); + } else { + printf("Not at debugger stop.\n"); + } + } else if (argc == 3) { + // Print information about all/the specified breakpoint(s). + if (strcmp(arg1, "info") == 0) { + if (strcmp(arg2, "all") == 0) { + printf("Stop information:\n"); + for (uint32_t i = kMaxWatchpointCode + 1; + i <= kMaxStopCode; + i++) { + sim_->printStopInfo(i); + } + } else if (getValue(arg2, &value)) { + sim_->printStopInfo(value); + } else { + printf("Unrecognized argument.\n"); + } + } else if (strcmp(arg1, "enable") == 0) { + // Enable all/the specified breakpoint(s). + if (strcmp(arg2, "all") == 0) { + for (uint32_t i = kMaxWatchpointCode + 1; + i <= kMaxStopCode; + i++) { + sim_->enableStop(i); + } + } else if (getValue(arg2, &value)) { + sim_->enableStop(value); + } else { + printf("Unrecognized argument.\n"); + } + } else if (strcmp(arg1, "disable") == 0) { + // Disable all/the specified breakpoint(s). + if (strcmp(arg2, "all") == 0) { + for (uint32_t i = kMaxWatchpointCode + 1; + i <= kMaxStopCode; + i++) { + sim_->disableStop(i); + } + } else if (getValue(arg2, &value)) { + sim_->disableStop(value); + } else { + printf("Unrecognized argument.\n"); + } + } + } else { + printf("Wrong usage. Use help command for more information.\n"); + } + } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { + printf("cont\n"); + printf(" continue execution (alias 'c')\n"); + printf("stepi\n"); + printf(" step one instruction (alias 'si')\n"); + printf("print <register>\n"); + printf(" print register content (alias 'p')\n"); + printf(" use register name 'all' to print all registers\n"); + printf("printobject <register>\n"); + printf(" print an object from a register (alias 'po')\n"); + printf("stack [<words>]\n"); + printf(" dump stack content, default dump 10 words)\n"); + printf("mem <address> [<words>]\n"); + printf(" dump memory content, default dump 10 words)\n"); + printf("flags\n"); + printf(" print flags\n"); + printf("disasm [<instructions>]\n"); + printf("disasm [<address/register>]\n"); + printf("disasm [[<address/register>] <instructions>]\n"); + printf(" disassemble code, default is 10 instructions\n"); + printf(" from pc (alias 'di')\n"); + printf("gdb\n"); + printf(" enter gdb\n"); + printf("break <address>\n"); + printf(" set a break point on the address\n"); + printf("del\n"); + printf(" delete the breakpoint\n"); + printf("stop feature:\n"); + printf(" Description:\n"); + printf(" Stops are debug instructions inserted by\n"); + printf(" the Assembler::stop() function.\n"); + printf(" When hitting a stop, the Simulator will\n"); + printf(" stop and and give control to the Debugger.\n"); + printf(" All stop codes are watched:\n"); + printf(" - They can be enabled / disabled: the Simulator\n"); + printf(" will / won't stop when hitting them.\n"); + printf(" - The Simulator keeps track of how many times they \n"); + printf(" are met. (See the info command.) Going over a\n"); + printf(" disabled stop still increases its counter. \n"); + printf(" Commands:\n"); + printf(" stop info all/<code> : print infos about number <code>\n"); + printf(" or all stop(s).\n"); + printf(" stop enable/disable all/<code> : enables / disables\n"); + printf(" all or number <code> stop(s)\n"); + printf(" stop unstop\n"); + printf(" ignore the stop instruction at the current location\n"); + printf(" from now on\n"); + } else { + printf("Unknown command: %s\n", cmd); + } + } + } + + // Add all the breakpoints back to stop execution and enter the debugger + // shell when hit. + redoBreakpoints(); + +#undef COMMAND_SIZE +#undef ARG_SIZE + +#undef STR +#undef XSTR +} + +static bool +AllOnOnePage(uintptr_t start, int size) +{ + intptr_t start_page = (start & ~CachePage::kPageMask); + intptr_t end_page = ((start + size) & ~CachePage::kPageMask); + return start_page == end_page; +} + +void +Simulator::setLastDebuggerInput(char* input) +{ + js_free(lastDebuggerInput_); + lastDebuggerInput_ = input; +} + +static CachePage* +GetCachePageLocked(Simulator::ICacheMap& i_cache, void* page) +{ + Simulator::ICacheMap::AddPtr p = i_cache.lookupForAdd(page); + if (p) + return p->value(); + + CachePage* new_page = js_new<CachePage>(); + if (!i_cache.add(p, page, new_page)) + return nullptr; + return new_page; +} + +// Flush from start up to and not including start + size. +static void +FlushOnePageLocked(Simulator::ICacheMap& i_cache, intptr_t start, int size) +{ + MOZ_ASSERT(size <= CachePage::kPageSize); + MOZ_ASSERT(AllOnOnePage(start, size - 1)); + MOZ_ASSERT((start & CachePage::kLineMask) == 0); + MOZ_ASSERT((size & CachePage::kLineMask) == 0); + void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask)); + int offset = (start & CachePage::kPageMask); + CachePage* cache_page = GetCachePageLocked(i_cache, page); + char* valid_bytemap = cache_page->validityByte(offset); + memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); +} + +static void +FlushICacheLocked(Simulator::ICacheMap& i_cache, void* start_addr, size_t size) +{ + intptr_t start = reinterpret_cast<intptr_t>(start_addr); + int intra_line = (start & CachePage::kLineMask); + start -= intra_line; + size += intra_line; + size = ((size - 1) | CachePage::kLineMask) + 1; + int offset = (start & CachePage::kPageMask); + while (!AllOnOnePage(start, size - 1)) { + int bytes_to_flush = CachePage::kPageSize - offset; + FlushOnePageLocked(i_cache, start, bytes_to_flush); + start += bytes_to_flush; + size -= bytes_to_flush; + MOZ_ASSERT((start & CachePage::kPageMask) == 0); + offset = 0; + } + if (size != 0) + FlushOnePageLocked(i_cache, start, size); +} + +static void +CheckICacheLocked(Simulator::ICacheMap& i_cache, SimInstruction* instr) +{ + intptr_t address = reinterpret_cast<intptr_t>(instr); + void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask)); + void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask)); + int offset = (address & CachePage::kPageMask); + CachePage* cache_page = GetCachePageLocked(i_cache, page); + char* cache_valid_byte = cache_page->validityByte(offset); + bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); + char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask); + if (cache_hit) { + // Check that the data in memory matches the contents of the I-cache. + MOZ_ASSERT(memcmp(reinterpret_cast<void*>(instr), + cache_page->cachedData(offset), + SimInstruction::kInstrSize) == 0); + } else { + // Cache miss. Load memory into the cache. + memcpy(cached_line, line, CachePage::kLineLength); + *cache_valid_byte = CachePage::LINE_VALID; + } +} + +HashNumber +Simulator::ICacheHasher::hash(const Lookup& l) +{ + return U32(reinterpret_cast<uintptr_t>(l)) >> 2; +} + +bool +Simulator::ICacheHasher::match(const Key& k, const Lookup& l) +{ + MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0); + MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0); + return k == l; +} + +void +Simulator::FlushICache(void* start_addr, size_t size) +{ + if (Simulator::ICacheCheckingEnabled) { + Simulator* sim = Simulator::Current(); + AutoLockSimulatorCache als(sim); + js::jit::FlushICacheLocked(sim->icache(), start_addr, size); + } +} + +Simulator::Simulator() + : cacheLock_(mutexid::SimulatorCacheLock) +{ + // Set up simulator support first. Some of this information is needed to + // setup the architecture state. + + // Note, allocation and anything that depends on allocated memory is + // deferred until init(), in order to handle OOM properly. + + stack_ = nullptr; + stackLimit_ = 0; + pc_modified_ = false; + icount_ = 0; + break_count_ = 0; + resume_pc_ = 0; + break_pc_ = nullptr; + break_instr_ = 0; + single_stepping_ = false; + single_step_callback_ = nullptr; + single_step_callback_arg_ = nullptr; + + // Set up architecture state. + // All registers are initialized to zero to start with. + for (int i = 0; i < Register::kNumSimuRegisters; i++) + registers_[i] = 0; + for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) + FPUregisters_[i] = 0; + FCSR_ = 0; + + // The ra and pc are initialized to a known bad value that will cause an + // access violation if the simulator ever tries to execute it. + registers_[pc] = bad_ra; + registers_[ra] = bad_ra; + + for (int i = 0; i < kNumExceptions; i++) + exceptions[i] = 0; + + lastDebuggerInput_ = nullptr; + + redirection_ = nullptr; +} + +bool +Simulator::init() +{ + if (!icache_.init()) + return false; + + // Allocate 2MB for the stack. Note that we will only use 1MB, see below. + static const size_t stackSize = 2 * 1024 * 1024; + stack_ = static_cast<char*>(js_malloc(stackSize)); + if (!stack_) + return false; + + // Leave a safety margin of 1MB to prevent overrunning the stack when + // pushing values (total stack size is 2MB). + stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024; + + // The sp is initialized to point to the bottom (high address) of the + // allocated stack area. To be safe in potential stack underflows we leave + // some buffer below. + registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64; + + return true; +} + +// When the generated code calls an external reference we need to catch that in +// the simulator. The external reference will be a function compiled for the +// host architecture. We need to call that function instead of trying to +// execute it with the simulator. We do that by redirecting the external +// reference to a swi (software-interrupt) instruction that is handled by +// the simulator. We write the original destination of the jump just at a known +// offset from the swi instruction so the simulator knows what to call. +class Redirection +{ + friend class Simulator; + + // sim's lock must already be held. + Redirection(void* nativeFunction, ABIFunctionType type, Simulator* sim) + : nativeFunction_(nativeFunction), + swiInstruction_(kCallRedirInstr), + type_(type), + next_(nullptr) + { + next_ = sim->redirection(); + if (Simulator::ICacheCheckingEnabled) + FlushICacheLocked(sim->icache(), addressOfSwiInstruction(), SimInstruction::kInstrSize); + sim->setRedirection(this); + } + + public: + void* addressOfSwiInstruction() { return &swiInstruction_; } + void* nativeFunction() const { return nativeFunction_; } + ABIFunctionType type() const { return type_; } + + static Redirection* Get(void* nativeFunction, ABIFunctionType type) { + Simulator* sim = Simulator::Current(); + + AutoLockSimulatorCache als(sim); + + Redirection* current = sim->redirection(); + for (; current != nullptr; current = current->next_) { + if (current->nativeFunction_ == nativeFunction) { + MOZ_ASSERT(current->type() == type); + return current; + } + } + + Redirection* redir = (Redirection*)js_malloc(sizeof(Redirection)); + if (!redir) { + MOZ_ReportAssertionFailure("[unhandlable oom] Simulator redirection", + __FILE__, __LINE__); + MOZ_CRASH(); + } + new(redir) Redirection(nativeFunction, type, sim); + return redir; + } + + static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) { + uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction); + uint8_t* addrOfRedirection = addrOfSwi - offsetof(Redirection, swiInstruction_); + return reinterpret_cast<Redirection*>(addrOfRedirection); + } + + private: + void* nativeFunction_; + uint32_t swiInstruction_; + ABIFunctionType type_; + Redirection* next_; +}; + +Simulator::~Simulator() +{ + js_free(stack_); + Redirection* r = redirection_; + while (r) { + Redirection* next = r->next_; + js_delete(r); + r = next; + } +} + +/* static */ void* +Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type) +{ + Redirection* redirection = Redirection::Get(nativeFunction, type); + return redirection->addressOfSwiInstruction(); +} + +// Get the active Simulator for the current thread. +Simulator* +Simulator::Current() +{ + return TlsPerThreadData.get()->simulator(); +} + +// Sets the register in the architecture state. It will also deal with updating +// Simulator internal state for special registers such as PC. +void +Simulator::setRegister(int reg, int64_t value) +{ + MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters)); + if (reg == pc) + pc_modified_ = true; + + // Zero register always holds 0. + registers_[reg] = (reg == 0) ? 0 : value; +} + +void +Simulator::setFpuRegister(int fpureg, int64_t value) +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + FPUregisters_[fpureg] = value; +} + +void +Simulator::setFpuRegisterLo(int fpureg, int32_t value) +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]) = value; +} + +void +Simulator::setFpuRegisterHi(int fpureg, int32_t value) +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1) = value; +} + +void +Simulator::setFpuRegisterFloat(int fpureg, float value) +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value; +} + +void +Simulator::setFpuRegisterDouble(int fpureg, double value) +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value; +} + +// Get the register from the architecture state. This function does handle +// the special case of accessing the PC register. +int64_t +Simulator::getRegister(int reg) const +{ + MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters)); + if (reg == 0) + return 0; + return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0); +} + +int64_t +Simulator::getFpuRegister(int fpureg) const +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + return FPUregisters_[fpureg]; +} + +int32_t +Simulator::getFpuRegisterLo(int fpureg) const +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]); +} + +int32_t +Simulator::getFpuRegisterHi(int fpureg) const +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1); +} + +float +Simulator::getFpuRegisterFloat(int fpureg) const +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]); +} + +double +Simulator::getFpuRegisterDouble(int fpureg) const +{ + MOZ_ASSERT((fpureg >= 0) && (fpureg < Simulator::FPURegister::kNumFPURegisters)); + return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]); +} + +void +Simulator::setCallResultDouble(double result) +{ + setFpuRegisterDouble(f0, result); +} + +void +Simulator::setCallResultFloat(float result) +{ + setFpuRegisterFloat(f0, result); +} + +void +Simulator::setCallResult(int64_t res) +{ + setRegister(v0, res); +} + +void +Simulator::setCallResult(__int128_t res) +{ + setRegister(v0, I64(res)); + setRegister(v1, I64(res >> 64)); +} + +// Helper functions for setting and testing the FCSR register's bits. +void +Simulator::setFCSRBit(uint32_t cc, bool value) +{ + if (value) + FCSR_ |= (1 << cc); + else + FCSR_ &= ~(1 << cc); +} + +bool +Simulator::testFCSRBit(uint32_t cc) +{ + return FCSR_ & (1 << cc); +} + +// Sets the rounding error codes in FCSR based on the result of the rounding. +// Returns true if the operation was invalid. +bool +Simulator::setFCSRRoundError(double original, double rounded) +{ + bool ret = false; + + if (!std::isfinite(original) || !std::isfinite(rounded)) { + setFCSRBit(kFCSRInvalidOpFlagBit, true); + ret = true; + } + + if (original != rounded) + setFCSRBit(kFCSRInexactFlagBit, true); + + if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { + setFCSRBit(kFCSRUnderflowFlagBit, true); + ret = true; + } + + if (rounded > INT_MAX || rounded < INT_MIN) { + setFCSRBit(kFCSROverflowFlagBit, true); + // The reference is not really clear but it seems this is required: + setFCSRBit(kFCSRInvalidOpFlagBit, true); + ret = true; + } + + return ret; +} + +// Raw access to the PC register. +void +Simulator::set_pc(int64_t value) +{ + pc_modified_ = true; + registers_[pc] = value; +} + +bool +Simulator::has_bad_pc() const +{ + return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); +} + +// Raw access to the PC register without the special adjustment when reading. +int64_t +Simulator::get_pc() const +{ + return registers_[pc]; +} + +// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an +// interrupt is caused. On others it does a funky rotation thing. For now we +// simply disallow unaligned reads, but at some point we may want to move to +// emulating the rotate behaviour. Note that simulator runs have the runtime +// system running directly on the host system and only generated code is +// executed in the simulator. Since the host is typically IA32 we will not +// get the correct MIPS-like behaviour on unaligned accesses. + +uint8_t +Simulator::readBU(uint64_t addr, SimInstruction* instr) +{ + uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); + return* ptr; +} + +int8_t +Simulator::readB(uint64_t addr, SimInstruction* instr) +{ + int8_t* ptr = reinterpret_cast<int8_t*>(addr); + return* ptr; +} + +void +Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr) +{ + uint8_t* ptr = reinterpret_cast<uint8_t*>(addr); + *ptr = value; +} + +void +Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr) +{ + int8_t* ptr = reinterpret_cast<int8_t*>(addr); + *ptr = value; +} + +uint16_t +Simulator::readHU(uint64_t addr, SimInstruction* instr) +{ + if ((addr & 1) == 0) { + uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); + return *ptr; + } + printf("Unaligned unsigned halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + return 0; +} + +int16_t +Simulator::readH(uint64_t addr, SimInstruction* instr) +{ + if ((addr & 1) == 0) { + int16_t* ptr = reinterpret_cast<int16_t*>(addr); + return *ptr; + } + printf("Unaligned signed halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + return 0; +} + +void +Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr) +{ + if ((addr & 1) == 0) { + uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); + *ptr = value; + return; + } + printf("Unaligned unsigned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); +} + +void +Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr) +{ + if ((addr & 1) == 0) { + int16_t* ptr = reinterpret_cast<int16_t*>(addr); + *ptr = value; + return; + } + printf("Unaligned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); +} + +uint32_t +Simulator::readWU(uint64_t addr, SimInstruction* instr) +{ + if (addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + } + if ((addr & 3) == 0) { + uint32_t* ptr = reinterpret_cast<uint32_t*>(addr); + return *ptr; + } + printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + return 0; +} + +int32_t +Simulator::readW(uint64_t addr, SimInstruction* instr) +{ + if (addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + } + if ((addr & 3) == 0) { + int32_t* ptr = reinterpret_cast<int32_t*>(addr); + return *ptr; + } + printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + return 0; +} + +void +Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr) +{ + if (addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + } + if ((addr & 3) == 0) { + uint32_t* ptr = reinterpret_cast<uint32_t*>(addr); + *ptr = value; + return; + } + printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); +} + +void +Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr) +{ + if (addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + } + if ((addr & 3) == 0) { + int32_t* ptr = reinterpret_cast<int32_t*>(addr); + *ptr = value; + return; + } + printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); +} + +int64_t +Simulator::readDW(uint64_t addr, SimInstruction* instr) +{ + if (addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + printf("Memory read from bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + } + if ((addr & kPointerAlignmentMask) == 0) { + int64_t* ptr = reinterpret_cast<int64_t*>(addr); + return* ptr; + } + printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + return 0; +} + +void +Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr) +{ + if (addr < 0x400) { + // This has to be a NULL-dereference, drop into debugger. + printf("Memory write to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + } + if ((addr & kPointerAlignmentMask) == 0) { + int64_t* ptr = reinterpret_cast<int64_t*>(addr); + *ptr = value; + return; + } + printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); +} + +double +Simulator::readD(uint64_t addr, SimInstruction* instr) +{ + if ((addr & kDoubleAlignmentMask) == 0) { + double* ptr = reinterpret_cast<double*>(addr); + return *ptr; + } + printf("Unaligned (double) read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); + return 0; +} + +void +Simulator::writeD(uint64_t addr, double value, SimInstruction* instr) +{ + if ((addr & kDoubleAlignmentMask) == 0) { + double* ptr = reinterpret_cast<double*>(addr); + *ptr = value; + return; + } + printf("Unaligned (double) write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", + addr, reinterpret_cast<intptr_t>(instr)); + MOZ_CRASH(); +} + +uintptr_t +Simulator::stackLimit() const +{ + return stackLimit_; +} + +uintptr_t* +Simulator::addressOfStackLimit() +{ + return &stackLimit_; +} + +bool +Simulator::overRecursed(uintptr_t newsp) const +{ + if (newsp == 0) + newsp = getRegister(sp); + return newsp <= stackLimit(); +} + +bool +Simulator::overRecursedWithExtra(uint32_t extra) const +{ + uintptr_t newsp = getRegister(sp) - extra; + return newsp <= stackLimit(); +} + +// Unsupported instructions use format to print an error and stop execution. +void +Simulator::format(SimInstruction* instr, const char* format) +{ + printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n", + reinterpret_cast<intptr_t>(instr), format); + MOZ_CRASH(); +} + +// Note: With the code below we assume that all runtime calls return a 64 bits +// result. If they don't, the v1 result register contains a bogus value, which +// is fine because it is caller-saved. +typedef int64_t (*Prototype_General0)(); +typedef int64_t (*Prototype_General1)(int64_t arg0); +typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1); +typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2); +typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3); +typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4); +typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5); +typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5, int64_t arg6); +typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3, + int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7); + +typedef double (*Prototype_Double_None)(); +typedef double (*Prototype_Double_Double)(double arg0); +typedef double (*Prototype_Double_Int)(int64_t arg0); +typedef int64_t (*Prototype_Int_Double)(double arg0); +typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1, int64_t arg2); +typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1, int64_t arg2, + int64_t arg3); +typedef float (*Prototype_Float32_Float32)(float arg0); + +typedef double (*Prototype_DoubleInt)(double arg0, int64_t arg1); +typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1); +typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1); +typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1); + +typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2); +typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1, + double arg2, double arg3); + +// Software interrupt instructions are used by the simulator to call into C++. +void +Simulator::softwareInterrupt(SimInstruction* instr) +{ + int32_t func = instr->functionFieldRaw(); + uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1; + + // We first check if we met a call_rt_redirected. + if (instr->instructionBits() == kCallRedirInstr) { +#if !defined(USES_N64_ABI) + MOZ_CRASH("Only N64 ABI supported."); +#else + Redirection* redirection = Redirection::FromSwiInstruction(instr); + int64_t arg0 = getRegister(a0); + int64_t arg1 = getRegister(a1); + int64_t arg2 = getRegister(a2); + int64_t arg3 = getRegister(a3); + int64_t arg4 = getRegister(a4); + int64_t arg5 = getRegister(a5); + + // This is dodgy but it works because the C entry stubs are never moved. + // See comment in codegen-arm.cc and bug 1242173. + int64_t saved_ra = getRegister(ra); + + intptr_t external = reinterpret_cast<intptr_t>(redirection->nativeFunction()); + + bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0; + if (!stack_aligned) { + fprintf(stderr, "Runtime call with unaligned stack!\n"); + MOZ_CRASH(); + } + + if (single_stepping_) + single_step_callback_(single_step_callback_arg_, this, nullptr); + + switch (redirection->type()) { + case Args_General0: { + Prototype_General0 target = reinterpret_cast<Prototype_General0>(external); + int64_t result = target(); + setCallResult(result); + break; + } + case Args_General1: { + Prototype_General1 target = reinterpret_cast<Prototype_General1>(external); + int64_t result = target(arg0); + setCallResult(result); + break; + } + case Args_General2: { + Prototype_General2 target = reinterpret_cast<Prototype_General2>(external); + int64_t result = target(arg0, arg1); + setCallResult(result); + break; + } + case Args_General3: { + Prototype_General3 target = reinterpret_cast<Prototype_General3>(external); + int64_t result = target(arg0, arg1, arg2); + setCallResult(result); + break; + } + case Args_General4: { + Prototype_General4 target = reinterpret_cast<Prototype_General4>(external); + int64_t result = target(arg0, arg1, arg2, arg3); + setCallResult(result); + break; + } + case Args_General5: { + Prototype_General5 target = reinterpret_cast<Prototype_General5>(external); + int64_t result = target(arg0, arg1, arg2, arg3, arg4); + setCallResult(result); + break; + } + case Args_General6: { + Prototype_General6 target = reinterpret_cast<Prototype_General6>(external); + int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); + setCallResult(result); + break; + } + case Args_General7: { + Prototype_General7 target = reinterpret_cast<Prototype_General7>(external); + int64_t arg6 = getRegister(a6); + int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6); + setCallResult(result); + break; + } + case Args_General8: { + Prototype_General8 target = reinterpret_cast<Prototype_General8>(external); + int64_t arg6 = getRegister(a6); + int64_t arg7 = getRegister(a7); + int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + setCallResult(result); + break; + } + case Args_Double_None: { + Prototype_Double_None target = reinterpret_cast<Prototype_Double_None>(external); + double dresult = target(); + setCallResultDouble(dresult); + break; + } + case Args_Int_Double: { + double dval0 = getFpuRegisterDouble(12); + Prototype_Int_Double target = reinterpret_cast<Prototype_Int_Double>(external); + int64_t res = target(dval0); + setRegister(v0, res); + break; + } + case Args_Int_DoubleIntInt: { + double dval = getFpuRegisterDouble(12); + Prototype_Int_DoubleIntInt target = reinterpret_cast<Prototype_Int_DoubleIntInt>(external); + int64_t res = target(dval, arg1, arg2); + setRegister(v0, res); + break; + } + case Args_Int_IntDoubleIntInt: { + double dval = getFpuRegisterDouble(13); + Prototype_Int_IntDoubleIntInt target = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external); + int64_t res = target(arg0, dval, arg2, arg3); + setRegister(v0, res); + break; + } + case Args_Double_Double: { + double dval0 = getFpuRegisterDouble(12); + Prototype_Double_Double target = reinterpret_cast<Prototype_Double_Double>(external); + double dresult = target(dval0); + setCallResultDouble(dresult); + break; + } + case Args_Float32_Float32: { + float fval0; + fval0 = getFpuRegisterFloat(12); + Prototype_Float32_Float32 target = reinterpret_cast<Prototype_Float32_Float32>(external); + float fresult = target(fval0); + setCallResultFloat(fresult); + break; + } + case Args_Double_Int: { + Prototype_Double_Int target = reinterpret_cast<Prototype_Double_Int>(external); + double dresult = target(arg0); + setCallResultDouble(dresult); + break; + } + case Args_Double_DoubleInt: { + double dval0 = getFpuRegisterDouble(12); + Prototype_DoubleInt target = reinterpret_cast<Prototype_DoubleInt>(external); + double dresult = target(dval0, arg1); + setCallResultDouble(dresult); + break; + } + case Args_Double_DoubleDouble: { + double dval0 = getFpuRegisterDouble(12); + double dval1 = getFpuRegisterDouble(13); + Prototype_Double_DoubleDouble target = reinterpret_cast<Prototype_Double_DoubleDouble>(external); + double dresult = target(dval0, dval1); + setCallResultDouble(dresult); + break; + } + case Args_Double_IntDouble: { + double dval1 = getFpuRegisterDouble(13); + Prototype_Double_IntDouble target = reinterpret_cast<Prototype_Double_IntDouble>(external); + double dresult = target(arg0, dval1); + setCallResultDouble(dresult); + break; + } + case Args_Int_IntDouble: { + double dval1 = getFpuRegisterDouble(13); + Prototype_Int_IntDouble target = reinterpret_cast<Prototype_Int_IntDouble>(external); + int64_t result = target(arg0, dval1); + setRegister(v0, result); + break; + } + case Args_Double_DoubleDoubleDouble: { + double dval0 = getFpuRegisterDouble(12); + double dval1 = getFpuRegisterDouble(13); + double dval2 = getFpuRegisterDouble(14); + Prototype_Double_DoubleDoubleDouble target = + reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external); + double dresult = target(dval0, dval1, dval2); + setCallResultDouble(dresult); + break; + } + case Args_Double_DoubleDoubleDoubleDouble: { + double dval0 = getFpuRegisterDouble(12); + double dval1 = getFpuRegisterDouble(13); + double dval2 = getFpuRegisterDouble(14); + double dval3 = getFpuRegisterDouble(15); + Prototype_Double_DoubleDoubleDoubleDouble target = + reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(external); + double dresult = target(dval0, dval1, dval2, dval3); + setCallResultDouble(dresult); + break; + } + default: + MOZ_CRASH("call"); + } + + if (single_stepping_) + single_step_callback_(single_step_callback_arg_, this, nullptr); + + setRegister(ra, saved_ra); + set_pc(getRegister(ra)); +#endif + } else if (func == ff_break && code <= kMaxStopCode) { + if (isWatchpoint(code)) { + printWatchpoint(code); + } else { + increaseStopCounter(code); + handleStop(code, instr); + } + } else { + // All remaining break_ codes, and all traps are handled here. + MipsDebugger dbg(this); + dbg.debug(); + } +} + +// Stop helper functions. +bool +Simulator::isWatchpoint(uint32_t code) +{ + return (code <= kMaxWatchpointCode); +} + +void +Simulator::printWatchpoint(uint32_t code) +{ + MipsDebugger dbg(this); + ++break_count_; + printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64 ") ----\n", + code, break_count_, icount_); + dbg.printAllRegs(); // Print registers and continue running. +} + +void +Simulator::handleStop(uint32_t code, SimInstruction* instr) +{ + // Stop if it is enabled, otherwise go on jumping over the stop + // and the message address. + if (isEnabledStop(code)) { + MipsDebugger dbg(this); + dbg.stop(instr); + } else { + set_pc(get_pc() + 2 * SimInstruction::kInstrSize); + } +} + +bool +Simulator::isStopInstruction(SimInstruction* instr) +{ + int32_t func = instr->functionFieldRaw(); + uint32_t code = U32(instr->bits(25, 6)); + return (func == ff_break) && code > kMaxWatchpointCode && code <= kMaxStopCode; +} + +bool +Simulator::isEnabledStop(uint32_t code) +{ + MOZ_ASSERT(code <= kMaxStopCode); + MOZ_ASSERT(code > kMaxWatchpointCode); + return !(watchedStops_[code].count_ & kStopDisabledBit); +} + +void +Simulator::enableStop(uint32_t code) +{ + if (!isEnabledStop(code)) + watchedStops_[code].count_ &= ~kStopDisabledBit; +} + +void +Simulator::disableStop(uint32_t code) +{ + if (isEnabledStop(code)) + watchedStops_[code].count_ |= kStopDisabledBit; +} + +void +Simulator::increaseStopCounter(uint32_t code) +{ + MOZ_ASSERT(code <= kMaxStopCode); + if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) { + printf("Stop counter for code %i has overflowed.\n" + "Enabling this code and reseting the counter to 0.\n", code); + watchedStops_[code].count_ = 0; + enableStop(code); + } else { + watchedStops_[code].count_++; + } +} + +// Print a stop status. +void +Simulator::printStopInfo(uint32_t code) +{ + if (code <= kMaxWatchpointCode) { + printf("That is a watchpoint, not a stop.\n"); + return; + } else if (code > kMaxStopCode) { + printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1); + return; + } + const char* state = isEnabledStop(code) ? "Enabled" : "Disabled"; + int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit; + // Don't print the state of unused breakpoints. + if (count != 0) { + if (watchedStops_[code].desc_) { + printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", + code, code, state, count, watchedStops_[code].desc_); + } else { + printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", + code, code, state, count); + } + } +} + +void +Simulator::signalExceptions() +{ + for (int i = 1; i < kNumExceptions; i++) { + if (exceptions[i] != 0) + MOZ_CRASH("Error: Exception raised."); + } +} + +// Helper function for decodeTypeRegister. +void +Simulator::configureTypeRegister(SimInstruction* instr, + int64_t& alu_out, + __int128& i128hilo, + unsigned __int128& u128hilo, + int64_t& next_pc, + int32_t& return_addr_reg, + bool& do_interrupt) +{ + // Every local variable declared here needs to be const. + // This is to make sure that changed values are sent back to + // decodeTypeRegister correctly. + + // Instruction fields. + const Opcode op = instr->opcodeFieldRaw(); + const int32_t rs_reg = instr->rsValue(); + const int64_t rs = getRegister(rs_reg); + const int32_t rt_reg = instr->rtValue(); + const int64_t rt = getRegister(rt_reg); + const int32_t rd_reg = instr->rdValue(); + const uint32_t sa = instr->saValue(); + + const int32_t fs_reg = instr->fsValue(); + __int128 temp; + + + // ---------- Configuration. + switch (op) { + case op_cop1: // Coprocessor instructions. + switch (instr->rsFieldRaw()) { + case rs_bc1: // Handled in DecodeTypeImmed, should never come here. + MOZ_CRASH(); + break; + case rs_cfc1: + // At the moment only FCSR is supported. + MOZ_ASSERT(fs_reg == kFCSRRegister); + alu_out = FCSR_; + break; + case rs_mfc1: + alu_out = getFpuRegisterLo(fs_reg); + break; + case rs_dmfc1: + alu_out = getFpuRegister(fs_reg); + break; + case rs_mfhc1: + alu_out = getFpuRegisterHi(fs_reg); + break; + case rs_ctc1: + case rs_mtc1: + case rs_dmtc1: + case rs_mthc1: + // Do the store in the execution step. + break; + case rs_s: + case rs_d: + case rs_w: + case rs_l: + case rs_ps: + // Do everything in the execution step. + break; + default: + MOZ_CRASH(); + }; + break; + case op_cop1x: + break; + case op_special: + switch (instr->functionFieldRaw()) { + case ff_jr: + case ff_jalr: + next_pc = getRegister(instr->rsValue()); + return_addr_reg = instr->rdValue(); + break; + case ff_sll: + alu_out = I32(rt) << sa; + break; + case ff_dsll: + alu_out = rt << sa; + break; + case ff_dsll32: + alu_out = rt << (sa + 32); + break; + case ff_srl: + if (rs_reg == 0) { + // Regular logical right shift of a word by a fixed number of + // bits instruction. RS field is always equal to 0. + alu_out = I32(U32(rt) >> sa); + } else { + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of SRL instruction, added in MIPS32 Release 2. + // RS field is equal to 00001. + alu_out = I32((U32(rt) >> sa) | (U32(rt) << (32 - sa))); + } + break; + case ff_dsrl: + if (rs_reg == 0) { + // Regular logical right shift of a double word by a fixed number of + // bits instruction. RS field is always equal to 0. + alu_out = U64(rt) >> sa; + } else { + // Logical right-rotate of a word by a fixed number of bits. This + // is special case of DSRL instruction, added in MIPS64 Release 2. + // RS field is equal to 00001. + alu_out = (U64(rt) >> sa) | (U64(rt) << (64 - sa)); + } + break; + case ff_dsrl32: + if (rs_reg == 0) { + // Regular logical right shift of a double word by a fixed number of + // bits instruction. RS field is always equal to 0. + alu_out = U64(rt) >> (sa + 32); + } else { + // Logical right-rotate of a double word by a fixed number of bits. This + // is special case of DSRL instruction, added in MIPS64 Release 2. + // RS field is equal to 00001. + alu_out = (U64(rt) >> (sa + 32)) | (U64(rt) << (64 - (sa + 32))); + } + break; + case ff_sra: + alu_out = I32(rt) >> sa; + break; + case ff_dsra: + alu_out = rt >> sa; + break; + case ff_dsra32: + alu_out = rt >> (sa + 32); + break; + case ff_sllv: + alu_out = I32(rt) << rs; + break; + case ff_dsllv: + alu_out = rt << rs; + break; + case ff_srlv: + if (sa == 0) { + // Regular logical right-shift of a word by a variable number of + // bits instruction. SA field is always equal to 0. + alu_out = I32(U32(rt) >> rs); + } else { + // Logical right-rotate of a word by a variable number of bits. + // This is special case od SRLV instruction, added in MIPS32 + // Release 2. SA field is equal to 00001. + alu_out = I32((U32(rt) >> rs) | (U32(rt) << (32 - rs))); + } + break; + case ff_dsrlv: + if (sa == 0) { + // Regular logical right-shift of a double word by a variable number of + // bits instruction. SA field is always equal to 0. + alu_out = U64(rt) >> rs; + } else { + // Logical right-rotate of a double word by a variable number of bits. + // This is special case od DSRLV instruction, added in MIPS64 + // Release 2. SA field is equal to 00001. + alu_out = (U64(rt) >> rs) | (U64(rt) << (64 - rs)); + } + break; + case ff_srav: + alu_out = I32(rt) >> rs; + break; + case ff_dsrav: + alu_out = rt >> rs; + break; + case ff_mfhi: + alu_out = getRegister(HI); + break; + case ff_mflo: + alu_out = getRegister(LO); + break; + case ff_mult: + i128hilo = I32(rs) * I32(rt); + break; + case ff_dmult: + i128hilo = I128(rs) * I128(rt); + break; + case ff_multu: + u128hilo = U32(rs) * U32(rt); + break; + case ff_dmultu: + u128hilo = U128(rs) * U128(rt); + break; + case ff_add: + alu_out = I32(rs) + I32(rt); + if ((alu_out << 32) != (alu_out << 31)) + exceptions[kIntegerOverflow] = 1; + alu_out = I32(alu_out); + break; + case ff_dadd: + temp = I128(rs) + I128(rt); + if ((temp << 64) != (temp << 63)) + exceptions[kIntegerOverflow] = 1; + alu_out = I64(temp); + break; + case ff_addu: + alu_out = I32(U32(rs) + U32(rt)); + break; + case ff_daddu: + alu_out = rs + rt; + break; + case ff_sub: + alu_out = I32(rs) - I32(rt); + if ((alu_out << 32) != (alu_out << 31)) + exceptions[kIntegerUnderflow] = 1; + alu_out = I32(alu_out); + break; + case ff_dsub: + temp = I128(rs) - I128(rt); + if ((temp << 64) != (temp << 63)) + exceptions[kIntegerUnderflow] = 1; + alu_out = I64(temp); + break; + case ff_subu: + alu_out = I32(U32(rs) - U32(rt)); + break; + case ff_dsubu: + alu_out = rs - rt; + break; + case ff_and: + alu_out = rs & rt; + break; + case ff_or: + alu_out = rs | rt; + break; + case ff_xor: + alu_out = rs ^ rt; + break; + case ff_nor: + alu_out = ~(rs | rt); + break; + case ff_slt: + alu_out = rs < rt ? 1 : 0; + break; + case ff_sltu: + alu_out = U64(rs) < U64(rt) ? 1 : 0; + break; + case ff_sync: + break; + // Break and trap instructions. + case ff_break: + do_interrupt = true; + break; + case ff_tge: + do_interrupt = rs >= rt; + break; + case ff_tgeu: + do_interrupt = U64(rs) >= U64(rt); + break; + case ff_tlt: + do_interrupt = rs < rt; + break; + case ff_tltu: + do_interrupt = U64(rs) < U64(rt); + break; + case ff_teq: + do_interrupt = rs == rt; + break; + case ff_tne: + do_interrupt = rs != rt; + break; + case ff_movn: + case ff_movz: + case ff_movci: + // No action taken on decode. + break; + case ff_div: + if (I32(rs) == INT_MIN && I32(rt) == -1) { + i128hilo = U32(INT_MIN); + } else { + uint32_t div = I32(rs) / I32(rt); + uint32_t mod = I32(rs) % I32(rt); + i128hilo = (I64(mod) << 32) | div; + } + break; + case ff_ddiv: + if (I32(rs) == INT_MIN && I32(rt) == -1) { + i128hilo = U64(INT64_MIN); + } else { + uint64_t div = rs / rt; + uint64_t mod = rs % rt; + i128hilo = (I128(mod) << 64) | div; + } + break; + case ff_divu: { + uint32_t div = U32(rs) / U32(rt); + uint32_t mod = U32(rs) % U32(rt); + i128hilo = (U64(mod) << 32) | div; + } + break; + case ff_ddivu: + if (0 == rt) { + i128hilo = (I128(Unpredictable) << 64) | I64(Unpredictable); + } else { + uint64_t div = U64(rs) / U64(rt); + uint64_t mod = U64(rs) % U64(rt); + i128hilo = (I128(mod) << 64) | div; + } + break; + default: + MOZ_CRASH(); + }; + break; + case op_special2: + switch (instr->functionFieldRaw()) { + case ff_mul: + alu_out = I32(I32(rs) * I32(rt)); // Only the lower 32 bits are kept. + break; + case ff_clz: + alu_out = U32(rs) ? __builtin_clz(U32(rs)) : 32; + break; + case ff_dclz: + alu_out = U64(rs) ? __builtin_clzl(U64(rs)) : 64; + break; + default: + MOZ_CRASH(); + }; + break; + case op_special3: + switch (instr->functionFieldRaw()) { + case ff_ins: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of insert. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of insert. + uint16_t lsb = sa; + uint16_t size = msb - lsb + 1; + uint32_t mask = (1 << size) - 1; + if (lsb > msb) + alu_out = Unpredictable; + else + alu_out = (U32(rt) & ~(mask << lsb)) | ((U32(rs) & mask) << lsb); + break; + } + case ff_dins: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of insert. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of insert. + uint16_t lsb = sa; + uint16_t size = msb - lsb + 1; + uint64_t mask = (1ul << size) - 1; + if (lsb > msb) + alu_out = Unpredictable; + else + alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb); + break; + } + case ff_dinsm: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of insert. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of insert. + uint16_t lsb = sa; + uint16_t size = msb - lsb + 33; + uint64_t mask = (1ul << size) - 1; + alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb); + break; + } + case ff_dinsu: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of insert. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of insert. + uint16_t lsb = sa + 32; + uint16_t size = msb - lsb + 33; + uint64_t mask = (1ul << size) - 1; + if (sa > msb) + alu_out = Unpredictable; + else + alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb); + break; + } + case ff_ext: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of extract. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa; + uint16_t size = msb + 1; + uint32_t mask = (1 << size) - 1; + if ((lsb + msb) > 31) + alu_out = Unpredictable; + else + alu_out = (U32(rs) & (mask << lsb)) >> lsb; + break; + } + case ff_dext: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of extract. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa; + uint16_t size = msb + 1; + uint64_t mask = (1ul << size) - 1; + alu_out = (U64(rs) & (mask << lsb)) >> lsb; + break; + } + case ff_dextm: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of extract. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa; + uint16_t size = msb + 33; + uint64_t mask = (1ul << size) - 1; + if ((lsb + msb + 32 + 1) > 64) + alu_out = Unpredictable; + else + alu_out = (U64(rs) & (mask << lsb)) >> lsb; + break; + } + case ff_dextu: { // Mips64r2 instruction. + // Interpret rd field as 5-bit msb of extract. + uint16_t msb = rd_reg; + // Interpret sa field as 5-bit lsb of extract. + uint16_t lsb = sa + 32; + uint16_t size = msb + 1; + uint64_t mask = (1ul << size) - 1; + if ((lsb + msb + 1) > 64) + alu_out = Unpredictable; + else + alu_out = (U64(rs) & (mask << lsb)) >> lsb; + break; + } + case ff_bshfl: { // Mips32r2 instruction. + if (16 == sa) // seb + alu_out = I64(I8(rt)); + else if (24 == sa) // seh + alu_out = I64(I16(rt)); + break; + } + default: + MOZ_CRASH(); + }; + break; + default: + MOZ_CRASH(); + }; +} + +// Handle execution based on instruction types. +void +Simulator::decodeTypeRegister(SimInstruction* instr) +{ + // Instruction fields. + const Opcode op = instr->opcodeFieldRaw(); + const int32_t rs_reg = instr->rsValue(); + const int64_t rs = getRegister(rs_reg); + const int32_t rt_reg = instr->rtValue(); + const int64_t rt = getRegister(rt_reg); + const int32_t rd_reg = instr->rdValue(); + + const int32_t fr_reg = instr->frValue(); + const int32_t fs_reg = instr->fsValue(); + const int32_t ft_reg = instr->ftValue(); + const int32_t fd_reg = instr->fdValue(); + __int128 i128hilo = 0; + unsigned __int128 u128hilo = 0; + + // ALU output. + // It should not be used as is. Instructions using it should always + // initialize it first. + int64_t alu_out = 0x12345678; + + // For break and trap instructions. + bool do_interrupt = false; + + // For jr and jalr. + // Get current pc. + int64_t current_pc = get_pc(); + // Next pc + int64_t next_pc = 0; + int32_t return_addr_reg = 31; + + // Set up the variables if needed before executing the instruction. + configureTypeRegister(instr, + alu_out, + i128hilo, + u128hilo, + next_pc, + return_addr_reg, + do_interrupt); + + // ---------- Raise exceptions triggered. + signalExceptions(); + + // ---------- Execution. + switch (op) { + case op_cop1: + switch (instr->rsFieldRaw()) { + case rs_bc1: // Branch on coprocessor condition. + MOZ_CRASH(); + break; + case rs_cfc1: + setRegister(rt_reg, alu_out); + case rs_mfc1: + setRegister(rt_reg, alu_out); + break; + case rs_dmfc1: + setRegister(rt_reg, alu_out); + break; + case rs_mfhc1: + setRegister(rt_reg, alu_out); + break; + case rs_ctc1: + // At the moment only FCSR is supported. + MOZ_ASSERT(fs_reg == kFCSRRegister); + FCSR_ = registers_[rt_reg]; + break; + case rs_mtc1: + setFpuRegisterLo(fs_reg, registers_[rt_reg]); + break; + case rs_dmtc1: + setFpuRegister(fs_reg, registers_[rt_reg]); + break; + case rs_mthc1: + setFpuRegisterHi(fs_reg, registers_[rt_reg]); + break; + case rs_s: + float f, ft_value, fs_value; + uint32_t cc, fcsr_cc; + int64_t i64; + fs_value = getFpuRegisterFloat(fs_reg); + ft_value = getFpuRegisterFloat(ft_reg); + cc = instr->fcccValue(); + fcsr_cc = GetFCSRConditionBit(cc); + switch (instr->functionFieldRaw()) { + case ff_add_fmt: + setFpuRegisterFloat(fd_reg, fs_value + ft_value); + break; + case ff_sub_fmt: + setFpuRegisterFloat(fd_reg, fs_value - ft_value); + break; + case ff_mul_fmt: + setFpuRegisterFloat(fd_reg, fs_value * ft_value); + break; + case ff_div_fmt: + setFpuRegisterFloat(fd_reg, fs_value / ft_value); + break; + case ff_abs_fmt: + setFpuRegisterFloat(fd_reg, fabsf(fs_value)); + break; + case ff_mov_fmt: + setFpuRegisterFloat(fd_reg, fs_value); + break; + case ff_neg_fmt: + setFpuRegisterFloat(fd_reg, -fs_value); + break; + case ff_sqrt_fmt: + setFpuRegisterFloat(fd_reg, sqrtf(fs_value)); + break; + case ff_c_un_fmt: + setFCSRBit(fcsr_cc, mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value)); + break; + case ff_c_eq_fmt: + setFCSRBit(fcsr_cc, (fs_value == ft_value)); + break; + case ff_c_ueq_fmt: + setFCSRBit(fcsr_cc, + (fs_value == ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value))); + break; + case ff_c_olt_fmt: + setFCSRBit(fcsr_cc, (fs_value < ft_value)); + break; + case ff_c_ult_fmt: + setFCSRBit(fcsr_cc, + (fs_value < ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value))); + break; + case ff_c_ole_fmt: + setFCSRBit(fcsr_cc, (fs_value <= ft_value)); + break; + case ff_c_ule_fmt: + setFCSRBit(fcsr_cc, + (fs_value <= ft_value) || (mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value))); + break; + case ff_cvt_d_fmt: + f = getFpuRegisterFloat(fs_reg); + setFpuRegisterDouble(fd_reg, static_cast<double>(f)); + break; + case ff_cvt_w_fmt: // Convert float to word. + // Rounding modes are not yet supported. + MOZ_ASSERT((FCSR_ & 3) == 0); + // In rounding mode 0 it should behave like ROUND. + case ff_round_w_fmt: { // Round double to word (round half to even). + float rounded = std::floor(fs_value + 0.5); + int32_t result = I32(rounded); + if ((result & 1) != 0 && result - fs_value == 0.5) { + // If the number is halfway between two integers, + // round to the even one. + result--; + } + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(fs_value, rounded)) { + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } + break; + } + case ff_trunc_w_fmt: { // Truncate float to word (round towards 0). + float rounded = truncf(fs_value); + int32_t result = I32(rounded); + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(fs_value, rounded)) { + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } + break; + } + case ff_floor_w_fmt: { // Round float to word towards negative infinity. + float rounded = std::floor(fs_value); + int32_t result = I32(rounded); + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(fs_value, rounded)) { + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } + break; + } + case ff_ceil_w_fmt: { // Round double to word towards positive infinity. + float rounded = std::ceil(fs_value); + int32_t result = I32(rounded); + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(fs_value, rounded)) { + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + } + break; + } + case ff_cvt_l_fmt: { // Mips64r2: Truncate float to 64-bit long-word. + float rounded = truncf(fs_value); + i64 = I64(rounded); + setFpuRegister(fd_reg, i64); + break; + } + case ff_round_l_fmt: { // Mips64r2 instruction. + float rounded = + fs_value > 0 ? std::floor(fs_value + 0.5) : std::ceil(fs_value - 0.5); + i64 = I64(rounded); + setFpuRegister(fd_reg, i64); + break; + } + case ff_trunc_l_fmt: { // Mips64r2 instruction. + float rounded = truncf(fs_value); + i64 = I64(rounded); + setFpuRegister(fd_reg, i64); + break; + } + case ff_floor_l_fmt: // Mips64r2 instruction. + i64 = I64(std::floor(fs_value)); + setFpuRegister(fd_reg, i64); + break; + case ff_ceil_l_fmt: // Mips64r2 instruction. + i64 = I64(std::ceil(fs_value)); + setFpuRegister(fd_reg, i64); + break; + case ff_cvt_ps_s: + case ff_c_f_fmt: + MOZ_CRASH(); + break; + default: + MOZ_CRASH(); + } + break; + case rs_d: + double dt_value, ds_value; + ds_value = getFpuRegisterDouble(fs_reg); + dt_value = getFpuRegisterDouble(ft_reg); + cc = instr->fcccValue(); + fcsr_cc = GetFCSRConditionBit(cc); + switch (instr->functionFieldRaw()) { + case ff_add_fmt: + setFpuRegisterDouble(fd_reg, ds_value + dt_value); + break; + case ff_sub_fmt: + setFpuRegisterDouble(fd_reg, ds_value - dt_value); + break; + case ff_mul_fmt: + setFpuRegisterDouble(fd_reg, ds_value * dt_value); + break; + case ff_div_fmt: + setFpuRegisterDouble(fd_reg, ds_value / dt_value); + break; + case ff_abs_fmt: + setFpuRegisterDouble(fd_reg, fabs(ds_value)); + break; + case ff_mov_fmt: + setFpuRegisterDouble(fd_reg, ds_value); + break; + case ff_neg_fmt: + setFpuRegisterDouble(fd_reg, -ds_value); + break; + case ff_sqrt_fmt: + setFpuRegisterDouble(fd_reg, sqrt(ds_value)); + break; + case ff_c_un_fmt: + setFCSRBit(fcsr_cc, mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value)); + break; + case ff_c_eq_fmt: + setFCSRBit(fcsr_cc, (ds_value == dt_value)); + break; + case ff_c_ueq_fmt: + setFCSRBit(fcsr_cc, + (ds_value == dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value))); + break; + case ff_c_olt_fmt: + setFCSRBit(fcsr_cc, (ds_value < dt_value)); + break; + case ff_c_ult_fmt: + setFCSRBit(fcsr_cc, + (ds_value < dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value))); + break; + case ff_c_ole_fmt: + setFCSRBit(fcsr_cc, (ds_value <= dt_value)); + break; + case ff_c_ule_fmt: + setFCSRBit(fcsr_cc, + (ds_value <= dt_value) || (mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value))); + break; + case ff_cvt_w_fmt: // Convert double to word. + // Rounding modes are not yet supported. + MOZ_ASSERT((FCSR_ & 3) == 0); + // In rounding mode 0 it should behave like ROUND. + case ff_round_w_fmt: { // Round double to word (round half to even). + double rounded = std::floor(ds_value + 0.5); + int32_t result = I32(rounded); + if ((result & 1) != 0 && result - ds_value == 0.5) { + // If the number is halfway between two integers, + // round to the even one. + result--; + } + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(ds_value, rounded)) + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + break; + } + case ff_trunc_w_fmt: { // Truncate double to word (round towards 0). + double rounded = trunc(ds_value); + int32_t result = I32(rounded); + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(ds_value, rounded)) + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + break; + } + case ff_floor_w_fmt: { // Round double to word towards negative infinity. + double rounded = std::floor(ds_value); + int32_t result = I32(rounded); + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(ds_value, rounded)) + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + break; + } + case ff_ceil_w_fmt: { // Round double to word towards positive infinity. + double rounded = std::ceil(ds_value); + int32_t result = I32(rounded); + setFpuRegisterLo(fd_reg, result); + if (setFCSRRoundError(ds_value, rounded)) + setFpuRegisterLo(fd_reg, kFPUInvalidResult); + break; + } + case ff_cvt_s_fmt: // Convert double to float (single). + setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value)); + break; + case ff_cvt_l_fmt: { // Mips64r2: Truncate double to 64-bit long-word. + double rounded = trunc(ds_value); + i64 = I64(rounded); + setFpuRegister(fd_reg, i64); + break; + } + case ff_trunc_l_fmt: { // Mips64r2 instruction. + double rounded = trunc(ds_value); + i64 = I64(rounded); + setFpuRegister(fd_reg, i64); + break; + } + case ff_round_l_fmt: { // Mips64r2 instruction. + double rounded = + ds_value > 0 ? std::floor(ds_value + 0.5) : std::ceil(ds_value - 0.5); + i64 = I64(rounded); + setFpuRegister(fd_reg, i64); + break; + } + case ff_floor_l_fmt: // Mips64r2 instruction. + i64 = I64(std::floor(ds_value)); + setFpuRegister(fd_reg, i64); + break; + case ff_ceil_l_fmt: // Mips64r2 instruction. + i64 = I64(std::ceil(ds_value)); + setFpuRegister(fd_reg, i64); + break; + case ff_c_f_fmt: + MOZ_CRASH(); + break; + default: + MOZ_CRASH(); + } + break; + case rs_w: + switch (instr->functionFieldRaw()) { + case ff_cvt_s_fmt: // Convert word to float (single). + i64 = getFpuRegisterLo(fs_reg); + setFpuRegisterFloat(fd_reg, static_cast<float>(i64)); + break; + case ff_cvt_d_fmt: // Convert word to double. + i64 = getFpuRegisterLo(fs_reg); + setFpuRegisterDouble(fd_reg, static_cast<double>(i64)); + break; + default: + MOZ_CRASH(); + }; + break; + case rs_l: + switch (instr->functionFieldRaw()) { + case ff_cvt_d_fmt: // Mips64r2 instruction. + i64 = getFpuRegister(fs_reg); + setFpuRegisterDouble(fd_reg, static_cast<double>(i64)); + break; + case ff_cvt_s_fmt: + MOZ_CRASH(); + break; + default: + MOZ_CRASH(); + } + break; + case rs_ps: + break; + default: + MOZ_CRASH(); + }; + break; + case op_cop1x: + switch (instr->functionFieldRaw()) { + case ff_madd_s: + float fr, ft, fs; + fr = getFpuRegisterFloat(fr_reg); + fs = getFpuRegisterFloat(fs_reg); + ft = getFpuRegisterFloat(ft_reg); + setFpuRegisterFloat(fd_reg, fs * ft + fr); + break; + case ff_madd_d: + double dr, dt, ds; + dr = getFpuRegisterDouble(fr_reg); + ds = getFpuRegisterDouble(fs_reg); + dt = getFpuRegisterDouble(ft_reg); + setFpuRegisterDouble(fd_reg, ds * dt + dr); + break; + default: + MOZ_CRASH(); + }; + break; + case op_special: + switch (instr->functionFieldRaw()) { + case ff_jr: { + SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>( + current_pc + SimInstruction::kInstrSize); + branchDelayInstructionDecode(branch_delay_instr); + set_pc(next_pc); + pc_modified_ = true; + break; + } + case ff_jalr: { + SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>( + current_pc + SimInstruction::kInstrSize); + setRegister(return_addr_reg, current_pc + 2 * SimInstruction::kInstrSize); + branchDelayInstructionDecode(branch_delay_instr); + set_pc(next_pc); + pc_modified_ = true; + break; + } + // Instructions using HI and LO registers. + case ff_mult: + setRegister(LO, I32(i128hilo & 0xffffffff)); + setRegister(HI, I32(i128hilo >> 32)); + break; + case ff_dmult: + setRegister(LO, I64(i128hilo & 0xfffffffffffffffful)); + setRegister(HI, I64(i128hilo >> 64)); + break; + case ff_multu: + setRegister(LO, I32(u128hilo & 0xffffffff)); + setRegister(HI, I32(u128hilo >> 32)); + break; + case ff_dmultu: + setRegister(LO, I64(u128hilo & 0xfffffffffffffffful)); + setRegister(HI, I64(u128hilo >> 64)); + break; + case ff_div: + case ff_divu: + // Divide by zero and overflow was not checked in the configuration + // step - div and divu do not raise exceptions. On division by 0 + // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1), + // return INT_MIN which is what the hardware does. + setRegister(LO, I32(i128hilo & 0xffffffff)); + setRegister(HI, I32(i128hilo >> 32)); + break; + case ff_ddiv: + case ff_ddivu: + // Divide by zero and overflow was not checked in the configuration + // step - div and divu do not raise exceptions. On division by 0 + // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1), + // return INT_MIN which is what the hardware does. + setRegister(LO, I64(i128hilo & 0xfffffffffffffffful)); + setRegister(HI, I64(i128hilo >> 64)); + break; + case ff_sync: + break; + // Break and trap instructions. + case ff_break: + case ff_tge: + case ff_tgeu: + case ff_tlt: + case ff_tltu: + case ff_teq: + case ff_tne: + if (do_interrupt) { + softwareInterrupt(instr); + } + break; + // Conditional moves. + case ff_movn: + if (rt) + setRegister(rd_reg, rs); + break; + case ff_movci: { + uint32_t cc = instr->fbccValue(); + uint32_t fcsr_cc = GetFCSRConditionBit(cc); + if (instr->bit(16)) { // Read Tf bit. + if (testFCSRBit(fcsr_cc)) + setRegister(rd_reg, rs); + } else { + if (!testFCSRBit(fcsr_cc)) + setRegister(rd_reg, rs); + } + break; + } + case ff_movz: + if (!rt) + setRegister(rd_reg, rs); + break; + default: // For other special opcodes we do the default operation. + setRegister(rd_reg, alu_out); + }; + break; + case op_special2: + switch (instr->functionFieldRaw()) { + case ff_mul: + setRegister(rd_reg, alu_out); + // HI and LO are UNPREDICTABLE after the operation. + setRegister(LO, Unpredictable); + setRegister(HI, Unpredictable); + break; + default: // For other special2 opcodes we do the default operation. + setRegister(rd_reg, alu_out); + } + break; + case op_special3: + switch (instr->functionFieldRaw()) { + case ff_ins: + case ff_dins: + case ff_dinsm: + case ff_dinsu: + // Ins instr leaves result in Rt, rather than Rd. + setRegister(rt_reg, alu_out); + break; + case ff_ext: + case ff_dext: + case ff_dextm: + case ff_dextu: + // Ext instr leaves result in Rt, rather than Rd. + setRegister(rt_reg, alu_out); + break; + case ff_bshfl: + setRegister(rd_reg, alu_out); + break; + default: + MOZ_CRASH(); + }; + break; + // Unimplemented opcodes raised an error in the configuration step before, + // so we can use the default here to set the destination register in common + // cases. + default: + setRegister(rd_reg, alu_out); + }; +} + +// Type 2: instructions using a 16 bits immediate. (e.g. addi, beq). +void +Simulator::decodeTypeImmediate(SimInstruction* instr) +{ + // Instruction fields. + Opcode op = instr->opcodeFieldRaw(); + int64_t rs = getRegister(instr->rsValue()); + int32_t rt_reg = instr->rtValue(); // Destination register. + int64_t rt = getRegister(rt_reg); + int16_t imm16 = instr->imm16Value(); + + int32_t ft_reg = instr->ftValue(); // Destination register. + + // Zero extended immediate. + uint32_t oe_imm16 = 0xffff & imm16; + // Sign extended immediate. + int32_t se_imm16 = imm16; + + // Get current pc. + int64_t current_pc = get_pc(); + // Next pc. + int64_t next_pc = bad_ra; + + // Used for conditional branch instructions. + bool do_branch = false; + bool execute_branch_delay_instruction = false; + + // Used for arithmetic instructions. + int64_t alu_out = 0; + // Floating point. + double fp_out = 0.0; + uint32_t cc, cc_value, fcsr_cc; + + // Used for memory instructions. + uint64_t addr = 0x0; + // Value to be written in memory. + uint64_t mem_value = 0x0; + __int128 temp; + + // ---------- Configuration (and execution for op_regimm). + switch (op) { + // ------------- op_cop1. Coprocessor instructions. + case op_cop1: + switch (instr->rsFieldRaw()) { + case rs_bc1: // Branch on coprocessor condition. + cc = instr->fbccValue(); + fcsr_cc = GetFCSRConditionBit(cc); + cc_value = testFCSRBit(fcsr_cc); + do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value; + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) + next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize; + else + next_pc = current_pc + kBranchReturnOffset; + break; + default: + MOZ_CRASH(); + }; + break; + // ------------- op_regimm class. + case op_regimm: + switch (instr->rtFieldRaw()) { + case rt_bltz: + do_branch = (rs < 0); + break; + case rt_bltzal: + do_branch = rs < 0; + break; + case rt_bgez: + do_branch = rs >= 0; + break; + case rt_bgezal: + do_branch = rs >= 0; + break; + default: + MOZ_CRASH(); + }; + switch (instr->rtFieldRaw()) { + case rt_bltz: + case rt_bltzal: + case rt_bgez: + case rt_bgezal: + // Branch instructions common part. + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize; + if (instr->isLinkingInstruction()) + setRegister(31, current_pc + kBranchReturnOffset); + } else { + next_pc = current_pc + kBranchReturnOffset; + } + default: + break; + }; + break; // case op_regimm. + // ------------- Branch instructions. + // When comparing to zero, the encoding of rt field is always 0, so we don't + // need to replace rt with zero. + case op_beq: + do_branch = (rs == rt); + break; + case op_bne: + do_branch = rs != rt; + break; + case op_blez: + do_branch = rs <= 0; + break; + case op_bgtz: + do_branch = rs > 0; + break; + // ------------- Arithmetic instructions. + case op_addi: + alu_out = I32(rs) + se_imm16; + if ((alu_out << 32) != (alu_out << 31)) + exceptions[kIntegerOverflow] = 1; + alu_out = I32(alu_out); + break; + case op_daddi: + temp = alu_out = rs + se_imm16; + if ((temp << 64) != (temp << 63)) + exceptions[kIntegerOverflow] = 1; + alu_out = I64(temp); + break; + case op_addiu: + alu_out = I32(I32(rs) + se_imm16); + break; + case op_daddiu: + alu_out = rs + se_imm16; + break; + case op_slti: + alu_out = (rs < se_imm16) ? 1 : 0; + break; + case op_sltiu: + alu_out = (U64(rs) < U64(se_imm16)) ? 1 : 0; + break; + case op_andi: + alu_out = rs & oe_imm16; + break; + case op_ori: + alu_out = rs | oe_imm16; + break; + case op_xori: + alu_out = rs ^ oe_imm16; + break; + case op_lui: + alu_out = (se_imm16 << 16); + break; + // ------------- Memory instructions. + case op_lbu: + addr = rs + se_imm16; + alu_out = readBU(addr, instr); + break; + case op_lb: + addr = rs + se_imm16; + alu_out = readB(addr, instr); + break; + case op_lhu: + addr = rs + se_imm16; + alu_out = readHU(addr, instr); + break; + case op_lh: + addr = rs + se_imm16; + alu_out = readH(addr, instr); + break; + case op_lwu: + addr = rs + se_imm16; + alu_out = readWU(addr, instr); + break; + case op_lw: + addr = rs + se_imm16; + alu_out = readW(addr, instr); + break; + case op_lwl: { + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & 3; + uint8_t byte_shift = 3 - al_offset; + uint32_t mask = (1 << byte_shift * 8) - 1; + addr = rs + se_imm16 - al_offset; + alu_out = readW(addr, instr); + alu_out <<= byte_shift * 8; + alu_out |= rt & mask; + break; + } + case op_lwr: { + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & 3; + uint8_t byte_shift = 3 - al_offset; + uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0; + addr = rs + se_imm16 - al_offset; + alu_out = readW(addr, instr); + alu_out = U32(alu_out) >> al_offset * 8; + alu_out |= rt & mask; + break; + } + case op_ll: + addr = rs + se_imm16; + alu_out = readW(addr, instr); + break; + case op_ld: + addr = rs + se_imm16; + alu_out = readDW(addr, instr); + break; + case op_ldl: { + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & 7; + uint8_t byte_shift = 7 - al_offset; + uint64_t mask = (1ul << byte_shift * 8) - 1; + addr = rs + se_imm16 - al_offset; + alu_out = readDW(addr, instr); + alu_out <<= byte_shift * 8; + alu_out |= rt & mask; + break; + } + case op_ldr: { + // al_offset is offset of the effective address within an aligned word. + uint8_t al_offset = (rs + se_imm16) & 7; + uint8_t byte_shift = 7 - al_offset; + uint64_t mask = al_offset ? (~0ul << (byte_shift + 1) * 8) : 0; + addr = rs + se_imm16 - al_offset; + alu_out = readDW(addr, instr); + alu_out = U64(alu_out) >> al_offset * 8; + alu_out |= rt & mask; + break; + } + case op_sb: + addr = rs + se_imm16; + break; + case op_sh: + addr = rs + se_imm16; + break; + case op_sw: + addr = rs + se_imm16; + break; + case op_swl: { + uint8_t al_offset = (rs + se_imm16) & 3; + uint8_t byte_shift = 3 - al_offset; + uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; + addr = rs + se_imm16 - al_offset; + mem_value = readW(addr, instr) & mask; + mem_value |= U32(rt) >> byte_shift * 8; + break; + } + case op_swr: { + uint8_t al_offset = (rs + se_imm16) & 3; + uint32_t mask = (1 << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; + mem_value = readW(addr, instr); + mem_value = (rt << al_offset * 8) | (mem_value & mask); + break; + } + case op_sc: + addr = rs + se_imm16; + break; + case op_sd: + addr = rs + se_imm16; + break; + case op_sdl: { + uint8_t al_offset = (rs + se_imm16) & 7; + uint8_t byte_shift = 7 - al_offset; + uint64_t mask = byte_shift ? (~0ul << (al_offset + 1) * 8) : 0; + addr = rs + se_imm16 - al_offset; + mem_value = readW(addr, instr) & mask; + mem_value |= U64(rt) >> byte_shift * 8; + break; + } + case op_sdr: { + uint8_t al_offset = (rs + se_imm16) & 7; + uint64_t mask = (1ul << al_offset * 8) - 1; + addr = rs + se_imm16 - al_offset; + mem_value = readW(addr, instr); + mem_value = (rt << al_offset * 8) | (mem_value & mask); + break; + } + case op_lwc1: + addr = rs + se_imm16; + alu_out = readW(addr, instr); + break; + case op_ldc1: + addr = rs + se_imm16; + fp_out = readD(addr, instr); + break; + case op_swc1: + case op_sdc1: + addr = rs + se_imm16; + break; + default: + MOZ_CRASH(); + }; + + // ---------- Raise exceptions triggered. + signalExceptions(); + + // ---------- Execution. + switch (op) { + // ------------- Branch instructions. + case op_beq: + case op_bne: + case op_blez: + case op_bgtz: + // Branch instructions common part. + execute_branch_delay_instruction = true; + // Set next_pc. + if (do_branch) { + next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize; + if (instr->isLinkingInstruction()) { + setRegister(31, current_pc + 2 * SimInstruction::kInstrSize); + } + } else { + next_pc = current_pc + 2 * SimInstruction::kInstrSize; + } + break; + // ------------- Arithmetic instructions. + case op_addi: + case op_daddi: + case op_addiu: + case op_daddiu: + case op_slti: + case op_sltiu: + case op_andi: + case op_ori: + case op_xori: + case op_lui: + setRegister(rt_reg, alu_out); + break; + // ------------- Memory instructions. + case op_lbu: + case op_lb: + case op_lhu: + case op_lh: + case op_lwu: + case op_lw: + case op_lwl: + case op_lwr: + case op_ll: + case op_ld: + case op_ldl: + case op_ldr: + setRegister(rt_reg, alu_out); + break; + case op_sb: + writeB(addr, I8(rt), instr); + break; + case op_sh: + writeH(addr, U16(rt), instr); + break; + case op_sw: + writeW(addr, I32(rt), instr); + break; + case op_swl: + writeW(addr, I32(mem_value), instr); + break; + case op_swr: + writeW(addr, I32(mem_value), instr); + break; + case op_sc: + writeW(addr, I32(rt), instr); + setRegister(rt_reg, 1); + break; + case op_sd: + writeDW(addr, rt, instr); + break; + case op_sdl: + writeDW(addr, mem_value, instr); + break; + case op_sdr: + writeDW(addr, mem_value, instr); + break; + case op_lwc1: + setFpuRegisterLo(ft_reg, alu_out); + break; + case op_ldc1: + setFpuRegisterDouble(ft_reg, fp_out); + break; + case op_swc1: + writeW(addr, getFpuRegisterLo(ft_reg), instr); + break; + case op_sdc1: + writeD(addr, getFpuRegisterDouble(ft_reg), instr); + break; + default: + break; + }; + + + if (execute_branch_delay_instruction) { + // Execute branch delay slot + // We don't check for end_sim_pc. First it should not be met as the current + // pc is valid. Secondly a jump should always execute its branch delay slot. + SimInstruction* branch_delay_instr = + reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize); + branchDelayInstructionDecode(branch_delay_instr); + } + + // If needed update pc after the branch delay execution. + if (next_pc != bad_ra) + set_pc(next_pc); +} + +// Type 3: instructions using a 26 bits immediate. (e.g. j, jal). +void +Simulator::decodeTypeJump(SimInstruction* instr) +{ + // Get current pc. + int64_t current_pc = get_pc(); + // Get unchanged bits of pc. + int64_t pc_high_bits = current_pc & 0xfffffffff0000000ul; + // Next pc. + int64_t next_pc = pc_high_bits | (instr->imm26Value() << 2); + + // Execute branch delay slot. + // We don't check for end_sim_pc. First it should not be met as the current pc + // is valid. Secondly a jump should always execute its branch delay slot. + SimInstruction* branch_delay_instr = + reinterpret_cast<SimInstruction*>(current_pc + SimInstruction::kInstrSize); + branchDelayInstructionDecode(branch_delay_instr); + + // Update pc and ra if necessary. + // Do this after the branch delay execution. + if (instr->isLinkingInstruction()) + setRegister(31, current_pc + 2 * SimInstruction::kInstrSize); + set_pc(next_pc); + pc_modified_ = true; +} + +// Executes the current instruction. +void +Simulator::instructionDecode(SimInstruction* instr) +{ + if (Simulator::ICacheCheckingEnabled) { + AutoLockSimulatorCache als(this); + CheckICacheLocked(icache(), instr); + } + pc_modified_ = false; + + switch (instr->instructionType()) { + case SimInstruction::kRegisterType: + decodeTypeRegister(instr); + break; + case SimInstruction::kImmediateType: + decodeTypeImmediate(instr); + break; + case SimInstruction::kJumpType: + decodeTypeJump(instr); + break; + default: + UNSUPPORTED(); + } + if (!pc_modified_) + setRegister(pc, reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize); +} + +void +Simulator::branchDelayInstructionDecode(SimInstruction* instr) +{ + if (single_stepping_) + single_step_callback_(single_step_callback_arg_, this, (void*)instr); + + if (instr->instructionBits() == NopInst) { + // Short-cut generic nop instructions. They are always valid and they + // never change the simulator state. + return; + } + + if (instr->isForbiddenInBranchDelay()) { + MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot."); + } + instructionDecode(instr); +} + +void +Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) +{ + single_stepping_ = true; + single_step_callback_ = cb; + single_step_callback_arg_ = arg; + single_step_callback_(single_step_callback_arg_, this, (void*)get_pc()); +} + +void +Simulator::disable_single_stepping() +{ + if (!single_stepping_) + return; + single_step_callback_(single_step_callback_arg_, this, (void*)get_pc()); + single_stepping_ = false; + single_step_callback_ = nullptr; + single_step_callback_arg_ = nullptr; +} + +template<bool enableStopSimAt> +void +Simulator::execute() +{ + if (single_stepping_) + single_step_callback_(single_step_callback_arg_, this, nullptr); + + // Get the PC to simulate. Cannot use the accessor here as we need the + // raw PC value and not the one used as input to arithmetic instructions. + int64_t program_counter = get_pc(); + WasmActivation* activation = TlsPerThreadData.get()->runtimeFromMainThread()->wasmActivationStack(); + + while (program_counter != end_sim_pc) { + if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) { + MipsDebugger dbg(this); + dbg.debug(); + } else { + if (single_stepping_) + single_step_callback_(single_step_callback_arg_, this, (void*)program_counter); + SimInstruction* instr = reinterpret_cast<SimInstruction *>(program_counter); + instructionDecode(instr); + icount_++; + + int64_t rpc = resume_pc_; + if (MOZ_UNLIKELY(rpc != 0)) { + // wasm signal handler ran and we have to adjust the pc. + activation->setResumePC((void*)get_pc()); + set_pc(rpc); + resume_pc_ = 0; + } + } + program_counter = get_pc(); + } + + if (single_stepping_) + single_step_callback_(single_step_callback_arg_, this, nullptr); +} + +void +Simulator::callInternal(uint8_t* entry) +{ + // Prepare to execute the code at entry. + setRegister(pc, reinterpret_cast<int64_t>(entry)); + // Put down marker for end of simulation. The simulator will stop simulation + // when the PC reaches this value. By saving the "end simulation" value into + // the LR the simulation stops when returning to this call point. + setRegister(ra, end_sim_pc); + + // Remember the values of callee-saved registers. + // The code below assumes that r9 is not used as sb (static base) in + // simulator code and therefore is regarded as a callee-saved register. + int64_t s0_val = getRegister(s0); + int64_t s1_val = getRegister(s1); + int64_t s2_val = getRegister(s2); + int64_t s3_val = getRegister(s3); + int64_t s4_val = getRegister(s4); + int64_t s5_val = getRegister(s5); + int64_t s6_val = getRegister(s6); + int64_t s7_val = getRegister(s7); + int64_t gp_val = getRegister(gp); + int64_t sp_val = getRegister(sp); + int64_t fp_val = getRegister(fp); + + // Set up the callee-saved registers with a known value. To be able to check + // that they are preserved properly across JS execution. + int64_t callee_saved_value = icount_; + setRegister(s0, callee_saved_value); + setRegister(s1, callee_saved_value); + setRegister(s2, callee_saved_value); + setRegister(s3, callee_saved_value); + setRegister(s4, callee_saved_value); + setRegister(s5, callee_saved_value); + setRegister(s6, callee_saved_value); + setRegister(s7, callee_saved_value); + setRegister(gp, callee_saved_value); + setRegister(fp, callee_saved_value); + + // Start the simulation. + if (Simulator::StopSimAt != -1) + execute<true>(); + else + execute<false>(); + + // Check that the callee-saved registers have been preserved. + MOZ_ASSERT(callee_saved_value == getRegister(s0)); + MOZ_ASSERT(callee_saved_value == getRegister(s1)); + MOZ_ASSERT(callee_saved_value == getRegister(s2)); + MOZ_ASSERT(callee_saved_value == getRegister(s3)); + MOZ_ASSERT(callee_saved_value == getRegister(s4)); + MOZ_ASSERT(callee_saved_value == getRegister(s5)); + MOZ_ASSERT(callee_saved_value == getRegister(s6)); + MOZ_ASSERT(callee_saved_value == getRegister(s7)); + MOZ_ASSERT(callee_saved_value == getRegister(gp)); + MOZ_ASSERT(callee_saved_value == getRegister(fp)); + + // Restore callee-saved registers with the original value. + setRegister(s0, s0_val); + setRegister(s1, s1_val); + setRegister(s2, s2_val); + setRegister(s3, s3_val); + setRegister(s4, s4_val); + setRegister(s5, s5_val); + setRegister(s6, s6_val); + setRegister(s7, s7_val); + setRegister(gp, gp_val); + setRegister(sp, sp_val); + setRegister(fp, fp_val); +} + +int64_t +Simulator::call(uint8_t* entry, int argument_count, ...) +{ + va_list parameters; + va_start(parameters, argument_count); + + int64_t original_stack = getRegister(sp); + // Compute position of stack on entry to generated code. + int64_t entry_stack = original_stack; + if (argument_count > kCArgSlotCount) + entry_stack = entry_stack - argument_count * sizeof(int64_t); + else + entry_stack = entry_stack - kCArgsSlotsSize; + + entry_stack &= ~U64(ABIStackAlignment - 1); + + intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); + + // Setup the arguments. + for (int i = 0; i < argument_count; i++) { + js::jit::Register argReg; + if (GetIntArgReg(i, &argReg)) + setRegister(argReg.code(), va_arg(parameters, int64_t)); + else + stack_argument[i] = va_arg(parameters, int64_t); + } + + va_end(parameters); + setRegister(sp, entry_stack); + + callInternal(entry); + + // Pop stack passed arguments. + MOZ_ASSERT(entry_stack == getRegister(sp)); + setRegister(sp, original_stack); + + int64_t result = getRegister(v0); + return result; +} + +uintptr_t +Simulator::pushAddress(uintptr_t address) +{ + int new_sp = getRegister(sp) - sizeof(uintptr_t); + uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); + *stack_slot = address; + setRegister(sp, new_sp); + return new_sp; +} + +uintptr_t +Simulator::popAddress() +{ + int current_sp = getRegister(sp); + uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); + uintptr_t address = *stack_slot; + setRegister(sp, current_sp + sizeof(uintptr_t)); + return address; +} + +} // namespace jit +} // namespace js + +js::jit::Simulator* +JSRuntime::simulator() const +{ + return simulator_; +} + +js::jit::Simulator* +js::PerThreadData::simulator() const +{ + return runtime_->simulator(); +} + +uintptr_t* +JSRuntime::addressOfSimulatorStackLimit() +{ + return simulator_->addressOfStackLimit(); +} diff --git a/js/src/jit/mips64/Simulator-mips64.h b/js/src/jit/mips64/Simulator-mips64.h new file mode 100644 index 000000000..de1930c91 --- /dev/null +++ b/js/src/jit/mips64/Simulator-mips64.h @@ -0,0 +1,440 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: */ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef jit_mips64_Simulator_mips64_h +#define jit_mips64_Simulator_mips64_h + +#ifdef JS_SIMULATOR_MIPS64 + +#include "jit/IonTypes.h" +#include "threading/Thread.h" +#include "vm/MutexIDs.h" + +namespace js { +namespace jit { + +class Simulator; +class Redirection; +class CachePage; +class AutoLockSimulator; + +// When the SingleStepCallback is called, the simulator is about to execute +// sim->get_pc() and the current machine state represents the completed +// execution of the previous pc. +typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc); + +const intptr_t kPointerAlignment = 8; +const intptr_t kPointerAlignmentMask = kPointerAlignment - 1; + +const intptr_t kDoubleAlignment = 8; +const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1; + + +// Number of general purpose registers. +const int kNumRegisters = 32; + +// In the simulator, the PC register is simulated as the 34th register. +const int kPCRegister = 34; + +// Number coprocessor registers. +const int kNumFPURegisters = 32; + +// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. +const int kFCSRRegister = 31; +const int kInvalidFPUControlRegister = -1; +const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1; + +// FCSR constants. +const uint32_t kFCSRInexactFlagBit = 2; +const uint32_t kFCSRUnderflowFlagBit = 3; +const uint32_t kFCSROverflowFlagBit = 4; +const uint32_t kFCSRDivideByZeroFlagBit = 5; +const uint32_t kFCSRInvalidOpFlagBit = 6; + +const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; +const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; +const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; +const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; +const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; + +const uint32_t kFCSRFlagMask = + kFCSRInexactFlagMask | + kFCSRUnderflowFlagMask | + kFCSROverflowFlagMask | + kFCSRDivideByZeroFlagMask | + kFCSRInvalidOpFlagMask; + +const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; + +// On MIPS64 Simulator breakpoints can have different codes: +// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, +// the simulator will run through them and print the registers. +// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() +// instructions (see Assembler::stop()). +// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the +// debugger. +const uint32_t kMaxWatchpointCode = 31; +const uint32_t kMaxStopCode = 127; + +// ----------------------------------------------------------------------------- +// Utility functions + +typedef uint32_t Instr; +class SimInstruction; + +class Simulator { + friend class Redirection; + friend class MipsDebugger; + friend class AutoLockSimulatorCache; + public: + + // Registers are declared in order. See "See MIPS Run Linux" chapter 2. + enum Register { + no_reg = -1, + zero_reg = 0, + at, + v0, v1, + a0, a1, a2, a3, a4, a5, a6, a7, + t0, t1, t2, t3, + s0, s1, s2, s3, s4, s5, s6, s7, + t8, t9, + k0, k1, + gp, + sp, + s8, + ra, + // LO, HI, and pc. + LO, + HI, + pc, // pc must be the last register. + kNumSimuRegisters, + // aliases + fp = s8 + }; + + // Coprocessor registers. + enum FPURegister { + f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, + f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, + f22, f23, f24, f25, f26, f27, f28, f29, f30, f31, + kNumFPURegisters + }; + + // Returns nullptr on OOM. + static Simulator* Create(JSContext* cx); + + static void Destroy(Simulator* simulator); + + // Constructor/destructor are for internal use only; use the static methods above. + Simulator(); + ~Simulator(); + + // The currently executing Simulator instance. Potentially there can be one + // for each native thread. + static Simulator* Current(); + + static inline uintptr_t StackLimit() { + return Simulator::Current()->stackLimit(); + } + + uintptr_t* addressOfStackLimit(); + + // Accessors for register state. Reading the pc value adheres to the MIPS + // architecture specification and is off by a 8 from the currently executing + // instruction. + void setRegister(int reg, int64_t value); + int64_t getRegister(int reg) const; + // Same for FPURegisters. + void setFpuRegister(int fpureg, int64_t value); + void setFpuRegisterLo(int fpureg, int32_t value); + void setFpuRegisterHi(int fpureg, int32_t value); + void setFpuRegisterFloat(int fpureg, float value); + void setFpuRegisterDouble(int fpureg, double value); + int64_t getFpuRegister(int fpureg) const; + int32_t getFpuRegisterLo(int fpureg) const; + int32_t getFpuRegisterHi(int fpureg) const; + float getFpuRegisterFloat(int fpureg) const; + double getFpuRegisterDouble(int fpureg) const; + void setFCSRBit(uint32_t cc, bool value); + bool testFCSRBit(uint32_t cc); + bool setFCSRRoundError(double original, double rounded); + + // Special case of set_register and get_register to access the raw PC value. + void set_pc(int64_t value); + int64_t get_pc() const; + + template <typename T> + T get_pc_as() const { return reinterpret_cast<T>(get_pc()); } + + void set_resume_pc(void* value) { + resume_pc_ = int64_t(value); + } + + void enable_single_stepping(SingleStepCallback cb, void* arg); + void disable_single_stepping(); + + // Accessor to the internal simulator stack area. + uintptr_t stackLimit() const; + bool overRecursed(uintptr_t newsp = 0) const; + bool overRecursedWithExtra(uint32_t extra) const; + + // Executes MIPS instructions until the PC reaches end_sim_pc. + template<bool enableStopSimAt> + void execute(); + + // Sets up the simulator state and grabs the result on return. + int64_t call(uint8_t* entry, int argument_count, ...); + + // Push an address onto the JS stack. + uintptr_t pushAddress(uintptr_t address); + + // Pop an address from the JS stack. + uintptr_t popAddress(); + + // Debugger input. + void setLastDebuggerInput(char* input); + char* lastDebuggerInput() { return lastDebuggerInput_; } + // ICache checking. + static void FlushICache(void* start, size_t size); + + // Returns true if pc register contains one of the 'SpecialValues' defined + // below (bad_ra, end_sim_pc). + bool has_bad_pc() const; + + private: + enum SpecialValues { + // Known bad pc value to ensure that the simulator does not execute + // without being properly setup. + bad_ra = -1, + // A pc value used to signal the simulator to stop execution. Generally + // the ra is set to this value on transition from native C code to + // simulated execution, so that the simulator can "return" to the native + // C code. + end_sim_pc = -2, + // Unpredictable value. + Unpredictable = 0xbadbeaf + }; + + bool init(); + + // Unsupported instructions use Format to print an error and stop execution. + void format(SimInstruction* instr, const char* format); + + // Read and write memory. + inline uint8_t readBU(uint64_t addr, SimInstruction* instr); + inline int8_t readB(uint64_t addr, SimInstruction* instr); + inline void writeB(uint64_t addr, uint8_t value, SimInstruction* instr); + inline void writeB(uint64_t addr, int8_t value, SimInstruction* instr); + + inline uint16_t readHU(uint64_t addr, SimInstruction* instr); + inline int16_t readH(uint64_t addr, SimInstruction* instr); + inline void writeH(uint64_t addr, uint16_t value, SimInstruction* instr); + inline void writeH(uint64_t addr, int16_t value, SimInstruction* instr); + + inline uint32_t readWU(uint64_t addr, SimInstruction* instr); + inline int32_t readW(uint64_t addr, SimInstruction* instr); + inline void writeW(uint64_t addr, uint32_t value, SimInstruction* instr); + inline void writeW(uint64_t addr, int32_t value, SimInstruction* instr); + + inline int64_t readDW(uint64_t addr, SimInstruction* instr); + inline int64_t readDWL(uint64_t addr, SimInstruction* instr); + inline int64_t readDWR(uint64_t addr, SimInstruction* instr); + inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr); + + inline double readD(uint64_t addr, SimInstruction* instr); + inline void writeD(uint64_t addr, double value, SimInstruction* instr); + + // Helper function for decodeTypeRegister. + void configureTypeRegister(SimInstruction* instr, + int64_t& alu_out, + __int128& i128hilo, + unsigned __int128& u128hilo, + int64_t& next_pc, + int32_t& return_addr_reg, + bool& do_interrupt); + + // Executing is handled based on the instruction type. + void decodeTypeRegister(SimInstruction* instr); + void decodeTypeImmediate(SimInstruction* instr); + void decodeTypeJump(SimInstruction* instr); + + // Used for breakpoints and traps. + void softwareInterrupt(SimInstruction* instr); + + // Stop helper functions. + bool isWatchpoint(uint32_t code); + void printWatchpoint(uint32_t code); + void handleStop(uint32_t code, SimInstruction* instr); + bool isStopInstruction(SimInstruction* instr); + bool isEnabledStop(uint32_t code); + void enableStop(uint32_t code); + void disableStop(uint32_t code); + void increaseStopCounter(uint32_t code); + void printStopInfo(uint32_t code); + + + // Executes one instruction. + void instructionDecode(SimInstruction* instr); + // Execute one instruction placed in a branch delay slot. + void branchDelayInstructionDecode(SimInstruction* instr); + + public: + static bool ICacheCheckingEnabled; + + static int64_t StopSimAt; + + // Runtime call support. + static void* RedirectNativeFunction(void* nativeFunction, ABIFunctionType type); + + private: + enum Exception { + kNone, + kIntegerOverflow, + kIntegerUnderflow, + kDivideByZero, + kNumExceptions + }; + int16_t exceptions[kNumExceptions]; + + // Exceptions. + void signalExceptions(); + + // Handle return value for runtime FP functions. + void setCallResultDouble(double result); + void setCallResultFloat(float result); + void setCallResult(int64_t res); + void setCallResult(__int128 res); + + void callInternal(uint8_t* entry); + + // Architecture state. + // Registers. + int64_t registers_[kNumSimuRegisters]; + // Coprocessor Registers. + int64_t FPUregisters_[kNumFPURegisters]; + // FPU control register. + uint32_t FCSR_; + + // Simulator support. + char* stack_; + uintptr_t stackLimit_; + bool pc_modified_; + int64_t icount_; + int64_t break_count_; + + int64_t resume_pc_; + + // Debugger input. + char* lastDebuggerInput_; + + // Registered breakpoints. + SimInstruction* break_pc_; + Instr break_instr_; + + // Single-stepping support + bool single_stepping_; + SingleStepCallback single_step_callback_; + void* single_step_callback_arg_; + + // A stop is watched if its code is less than kNumOfWatchedStops. + // Only watched stops support enabling/disabling and the counter feature. + static const uint32_t kNumOfWatchedStops = 256; + + + // Stop is disabled if bit 31 is set. + static const uint32_t kStopDisabledBit = 1U << 31; + + // A stop is enabled, meaning the simulator will stop when meeting the + // instruction, if bit 31 of watchedStops_[code].count is unset. + // The value watchedStops_[code].count & ~(1 << 31) indicates how many times + // the breakpoint was hit or gone through. + struct StopCountAndDesc { + uint32_t count_; + char* desc_; + }; + StopCountAndDesc watchedStops_[kNumOfWatchedStops]; + + private: + // ICache checking. + struct ICacheHasher { + typedef void* Key; + typedef void* Lookup; + static HashNumber hash(const Lookup& l); + static bool match(const Key& k, const Lookup& l); + }; + + public: + typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap; + + private: + // This lock creates a critical section around 'redirection_' and + // 'icache_', which are referenced both by the execution engine + // and by the off-thread compiler (see Redirection::Get in the cpp file). + Mutex cacheLock_; +#ifdef DEBUG + mozilla::Maybe<Thread::Id> cacheLockHolder_; +#endif + + Redirection* redirection_; + ICacheMap icache_; + + public: + ICacheMap& icache() { + // Technically we need the lock to access the innards of the + // icache, not to take its address, but the latter condition + // serves as a useful complement to the former. + MOZ_ASSERT(cacheLockHolder_.isSome()); + return icache_; + } + + Redirection* redirection() const { + MOZ_ASSERT(cacheLockHolder_.isSome()); + return redirection_; + } + + void setRedirection(js::jit::Redirection* redirection) { + MOZ_ASSERT(cacheLockHolder_.isSome()); + redirection_ = redirection; + } +}; + +#define JS_CHECK_SIMULATOR_RECURSION_WITH_EXTRA(cx, extra, onerror) \ + JS_BEGIN_MACRO \ + if (cx->mainThread().simulator()->overRecursedWithExtra(extra)) { \ + js::ReportOverRecursed(cx); \ + onerror; \ + } \ + JS_END_MACRO + +} // namespace jit +} // namespace js + +#endif /* JS_SIMULATOR_MIPS64 */ + +#endif /* jit_mips64_Simulator_mips64_h */ diff --git a/js/src/jit/mips64/Trampoline-mips64.cpp b/js/src/jit/mips64/Trampoline-mips64.cpp new file mode 100644 index 000000000..70a375019 --- /dev/null +++ b/js/src/jit/mips64/Trampoline-mips64.cpp @@ -0,0 +1,1363 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=8 sts=4 et sw=4 tw=99: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/DebugOnly.h" + +#include "jscompartment.h" + +#include "jit/Bailouts.h" +#include "jit/JitCompartment.h" +#include "jit/JitFrames.h" +#include "jit/JitSpewer.h" +#include "jit/Linker.h" +#include "jit/mips-shared/SharedICHelpers-mips-shared.h" +#include "jit/mips64/Bailouts-mips64.h" +#ifdef JS_ION_PERF +# include "jit/PerfSpewer.h" +#endif +#include "jit/VMFunctions.h" + +#include "jit/MacroAssembler-inl.h" + +using namespace js; +using namespace js::jit; + +// All registers to save and restore. This includes the stack pointer, since we +// use the ability to reference register values on the stack by index. +static const LiveRegisterSet AllRegs = + LiveRegisterSet(GeneralRegisterSet(Registers::AllMask), + FloatRegisterSet(FloatRegisters::AllMask)); + +static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "Not 32-bit clean."); + +struct EnterJITRegs +{ + double f31; + double f30; + double f29; + double f28; + double f27; + double f26; + double f25; + double f24; + + // non-volatile registers. + uint64_t ra; + uint64_t s7; + uint64_t s6; + uint64_t s5; + uint64_t s4; + uint64_t s3; + uint64_t s2; + uint64_t s1; + uint64_t s0; + // Save reg_vp(a7) on stack, use it after call jit code. + uint64_t a7; +}; + +static void +GenerateReturn(MacroAssembler& masm, int returnCode) +{ + MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs)); + + if (isLoongson()) { + // Restore non-volatile registers + masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0)); + masm.as_gslq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2)); + masm.as_gslq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4)); + masm.as_gslq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6)); + masm.as_gslq(s7, ra, StackPointer, offsetof(EnterJITRegs, ra)); + + // Restore non-volatile floating point registers + masm.as_gslq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25)); + masm.as_gslq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27)); + masm.as_gslq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29)); + masm.as_gslq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31)); + } else { + // Restore non-volatile registers + masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0)); + masm.as_ld(s1, StackPointer, offsetof(EnterJITRegs, s1)); + masm.as_ld(s2, StackPointer, offsetof(EnterJITRegs, s2)); + masm.as_ld(s3, StackPointer, offsetof(EnterJITRegs, s3)); + masm.as_ld(s4, StackPointer, offsetof(EnterJITRegs, s4)); + masm.as_ld(s5, StackPointer, offsetof(EnterJITRegs, s5)); + masm.as_ld(s6, StackPointer, offsetof(EnterJITRegs, s6)); + masm.as_ld(s7, StackPointer, offsetof(EnterJITRegs, s7)); + masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra)); + + // Restore non-volatile floating point registers + masm.as_ld(f24, StackPointer, offsetof(EnterJITRegs, f24)); + masm.as_ld(f25, StackPointer, offsetof(EnterJITRegs, f25)); + masm.as_ld(f26, StackPointer, offsetof(EnterJITRegs, f26)); + masm.as_ld(f27, StackPointer, offsetof(EnterJITRegs, f27)); + masm.as_ld(f28, StackPointer, offsetof(EnterJITRegs, f28)); + masm.as_ld(f29, StackPointer, offsetof(EnterJITRegs, f29)); + masm.as_ld(f30, StackPointer, offsetof(EnterJITRegs, f30)); + masm.as_ld(f31, StackPointer, offsetof(EnterJITRegs, f31)); + } + + masm.freeStack(sizeof(EnterJITRegs)); + + masm.branch(ra); +} + +static void +GeneratePrologue(MacroAssembler& masm) +{ + masm.reserveStack(sizeof(EnterJITRegs)); + + if (isLoongson()) { + masm.as_gssq(a7, s0, StackPointer, offsetof(EnterJITRegs, s0)); + masm.as_gssq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2)); + masm.as_gssq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4)); + masm.as_gssq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6)); + masm.as_gssq(s7, ra, StackPointer, offsetof(EnterJITRegs, ra)); + + masm.as_gssq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25)); + masm.as_gssq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27)); + masm.as_gssq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29)); + masm.as_gssq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31)); + return; + } + + masm.as_sd(s0, StackPointer, offsetof(EnterJITRegs, s0)); + masm.as_sd(s1, StackPointer, offsetof(EnterJITRegs, s1)); + masm.as_sd(s2, StackPointer, offsetof(EnterJITRegs, s2)); + masm.as_sd(s3, StackPointer, offsetof(EnterJITRegs, s3)); + masm.as_sd(s4, StackPointer, offsetof(EnterJITRegs, s4)); + masm.as_sd(s5, StackPointer, offsetof(EnterJITRegs, s5)); + masm.as_sd(s6, StackPointer, offsetof(EnterJITRegs, s6)); + masm.as_sd(s7, StackPointer, offsetof(EnterJITRegs, s7)); + masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra)); + masm.as_sd(a7, StackPointer, offsetof(EnterJITRegs, a7)); + + masm.as_sd(f24, StackPointer, offsetof(EnterJITRegs, f24)); + masm.as_sd(f25, StackPointer, offsetof(EnterJITRegs, f25)); + masm.as_sd(f26, StackPointer, offsetof(EnterJITRegs, f26)); + masm.as_sd(f27, StackPointer, offsetof(EnterJITRegs, f27)); + masm.as_sd(f28, StackPointer, offsetof(EnterJITRegs, f28)); + masm.as_sd(f29, StackPointer, offsetof(EnterJITRegs, f29)); + masm.as_sd(f30, StackPointer, offsetof(EnterJITRegs, f30)); + masm.as_sd(f31, StackPointer, offsetof(EnterJITRegs, f31)); +} + + +// Generates a trampoline for calling Jit compiled code from a C++ function. +// The trampoline use the EnterJitCode signature, with the standard x64 fastcall +// calling convention. +JitCode * +JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type) +{ + const Register reg_code = IntArgReg0; + const Register reg_argc = IntArgReg1; + const Register reg_argv = IntArgReg2; + const mozilla::DebugOnly<Register> reg_frame = IntArgReg3; + const Register reg_token = IntArgReg4; + const Register reg_chain = IntArgReg5; + const Register reg_values = IntArgReg6; + const Register reg_vp = IntArgReg7; + MacroAssembler masm(cx); + + MOZ_ASSERT(OsrFrameReg == reg_frame); + + GeneratePrologue(masm); + + // Save stack pointer into s4 + masm.movePtr(StackPointer, s4); + + // Save stack pointer as baseline frame. + if (type == EnterJitBaseline) + masm.movePtr(StackPointer, BaselineFrameReg); + + // Load the number of actual arguments into s3. + masm.unboxInt32(Address(reg_vp, 0), s3); + + /*************************************************************** + Loop over argv vector, push arguments onto stack in reverse order + ***************************************************************/ + + // if we are constructing, that also needs to include newTarget + { + Label noNewTarget; + masm.branchTest32(Assembler::Zero, reg_token, Imm32(CalleeToken_FunctionConstructing), + &noNewTarget); + + masm.add32(Imm32(1), reg_argc); + + masm.bind(&noNewTarget); + } + + // Make stack algined + masm.ma_and(s0, reg_argc, Imm32(1)); + masm.ma_dsubu(s1, StackPointer, Imm32(sizeof(Value))); + masm.as_movn(StackPointer, s1, s0); + + masm.as_dsll(s0, reg_argc, 3); // Value* argv + masm.addPtr(reg_argv, s0); // s0 = &argv[argc] + + // Loop over arguments, copying them from an unknown buffer onto the Ion + // stack so they can be accessed from JIT'ed code. + Label header, footer; + // If there aren't any arguments, don't do anything + masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump); + { + masm.bind(&header); + + masm.subPtr(Imm32(sizeof(Value)), s0); + masm.subPtr(Imm32(sizeof(Value)), StackPointer); + + ValueOperand value = ValueOperand(s6); + masm.loadValue(Address(s0, 0), value); + masm.storeValue(value, Address(StackPointer, 0)); + + masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump); + } + masm.bind(&footer); + + masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); + masm.storePtr(s3, Address(StackPointer, sizeof(uintptr_t))); // actual arguments + masm.storePtr(reg_token, Address(StackPointer, 0)); // callee token + + masm.subPtr(StackPointer, s4); + masm.makeFrameDescriptor(s4, JitFrame_Entry, JitFrameLayout::Size()); + masm.push(s4); // descriptor + + CodeLabel returnLabel; + CodeLabel oomReturnLabel; + if (type == EnterJitBaseline) { + // Handle OSR. + AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); + regs.take(OsrFrameReg); + regs.take(BaselineFrameReg); + regs.take(reg_code); + regs.take(ReturnReg); + regs.take(JSReturnOperand); + + Label notOsr; + masm.ma_b(OsrFrameReg, OsrFrameReg, ¬Osr, Assembler::Zero, ShortJump); + + Register numStackValues = reg_values; + regs.take(numStackValues); + Register scratch = regs.takeAny(); + + // Push return address. + masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer); + masm.ma_li(scratch, returnLabel.patchAt()); + masm.storePtr(scratch, Address(StackPointer, 0)); + + // Push previous frame pointer. + masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer); + masm.storePtr(BaselineFrameReg, Address(StackPointer, 0)); + + // Reserve frame. + Register framePtr = BaselineFrameReg; + masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer); + masm.movePtr(StackPointer, framePtr); + + // Reserve space for locals and stack values. + masm.ma_dsll(scratch, numStackValues, Imm32(3)); + masm.subPtr(scratch, StackPointer); + + // Enter exit frame. + masm.addPtr(Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset), scratch); + masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size()); + + // Push frame descriptor and fake return address. + masm.reserveStack(2 * sizeof(uintptr_t)); + masm.storePtr(scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor + masm.storePtr(zero, Address(StackPointer, 0)); // fake return address + + // No GC things to mark, push a bare token. + masm.enterFakeExitFrame(ExitFrameLayoutBareToken); + + masm.reserveStack(2 * sizeof(uintptr_t)); + masm.storePtr(framePtr, Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame + masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode + + masm.setupUnalignedABICall(scratch); + masm.passABIArg(BaselineFrameReg); // BaselineFrame + masm.passABIArg(OsrFrameReg); // InterpreterFrame + masm.passABIArg(numStackValues); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, jit::InitBaselineFrameForOsr)); + + regs.add(OsrFrameReg); + Register jitcode = regs.takeAny(); + masm.loadPtr(Address(StackPointer, 0), jitcode); + masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr); + masm.freeStack(2 * sizeof(uintptr_t)); + + Label error; + masm.freeStack(ExitFrameLayout::SizeWithFooter()); + masm.addPtr(Imm32(BaselineFrame::Size()), framePtr); + masm.branchIfFalseBool(ReturnReg, &error); + + // If OSR-ing, then emit instrumentation for setting lastProfilerFrame + // if profiler instrumentation is enabled. + { + Label skipProfilingInstrumentation; + Register realFramePtr = numStackValues; + AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled()); + masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), + &skipProfilingInstrumentation); + masm.ma_daddu(realFramePtr, framePtr, Imm32(sizeof(void*))); + masm.profilerEnterFrame(realFramePtr, scratch); + masm.bind(&skipProfilingInstrumentation); + } + + masm.jump(jitcode); + + // OOM: load error value, discard return address and previous frame + // pointer and return. + masm.bind(&error); + masm.movePtr(framePtr, StackPointer); + masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); + masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand); + masm.ma_li(scratch, oomReturnLabel.patchAt()); + masm.jump(scratch); + + masm.bind(¬Osr); + // Load the scope chain in R1. + MOZ_ASSERT(R1.scratchReg() != reg_code); + masm.ma_move(R1.scratchReg(), reg_chain); + } + + // The call will push the return address on the stack, thus we check that + // the stack would be aligned once the call is complete. + masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t)); + + // Call the function with pushing return address to stack. + masm.callJitNoProfiler(reg_code); + + if (type == EnterJitBaseline) { + // Baseline OSR will return here. + masm.bind(returnLabel.target()); + masm.addCodeLabel(returnLabel); + masm.bind(oomReturnLabel.target()); + masm.addCodeLabel(oomReturnLabel); + } + + // Pop arguments off the stack. + // s0 <- 8*argc (size of all arguments we pushed on the stack) + masm.pop(s0); + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), s0); + masm.addPtr(s0, StackPointer); + + // Store the returned value into the vp + masm.as_ld(reg_vp, StackPointer, offsetof(EnterJITRegs, a7)); + masm.storeValue(JSReturnOperand, Address(reg_vp, 0)); + + // Restore non-volatile registers and return. + GenerateReturn(masm, ShortJump); + + Linker linker(masm); + AutoFlushICache afc("GenerateEnterJIT"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "EnterJIT"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateInvalidator(JSContext* cx) +{ + MacroAssembler masm(cx); + + // Stack has to be alligned here. If not, we will have to fix it. + masm.checkStackAlignment(); + + // Push registers such that we can access them from [base + code]. + masm.PushRegsInMask(AllRegs); + + // Pass pointer to InvalidationBailoutStack structure. + masm.movePtr(StackPointer, a0); + + // Reserve place for return value and BailoutInfo pointer + masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); + // Pass pointer to return value. + masm.ma_daddu(a1, StackPointer, Imm32(sizeof(uintptr_t))); + // Pass pointer to BailoutInfo + masm.movePtr(StackPointer, a2); + + masm.setupAlignedABICall(); + masm.passABIArg(a0); + masm.passABIArg(a1); + masm.passABIArg(a2); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout)); + + masm.loadPtr(Address(StackPointer, 0), a2); + masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1); + // Remove the return address, the IonScript, the register state + // (InvaliationBailoutStack) and the space that was allocated for the + // return value. + masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)), StackPointer); + // remove the space that this frame was using before the bailout + // (computed by InvalidationBailout) + masm.addPtr(a1, StackPointer); + + // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2. + JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail(); + masm.branch(bailoutTail); + + Linker linker(masm); + AutoFlushICache afc("Invalidator"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + JitSpew(JitSpew_IonInvalidate, " invalidation thunk created at %p", (void*) code->raw()); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "Invalidator"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut) +{ + // Do not erase the frame pointer in this function. + + MacroAssembler masm(cx); + masm.pushReturnAddress(); + // Caller: + // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp + // '--- s3 ---' + + // ArgumentsRectifierReg contains the |nargs| pushed onto the current + // frame. Including |this|, there are (|nargs| + 1) arguments to copy. + MOZ_ASSERT(ArgumentsRectifierReg == s3); + + // Add |this|, in the counter of known arguments. + masm.addPtr(Imm32(1), ArgumentsRectifierReg); + + Register numActArgsReg = a6; + Register calleeTokenReg = a7; + Register numArgsReg = a5; + + // Load |nformals| into numArgsReg. + masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()), + calleeTokenReg); + masm.mov(calleeTokenReg, numArgsReg); + masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg); + masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()), numArgsReg); + + // Stash another copy in t3, since we are going to do destructive operations + // on numArgsReg + masm.mov(numArgsReg, t3); + + static_assert(CalleeToken_FunctionConstructing == 1, + "Ensure that we can use the constructing bit to count the value"); + masm.mov(calleeTokenReg, t2); + masm.ma_and(t2, Imm32(uint32_t(CalleeToken_FunctionConstructing))); + + // Including |this|, and |new.target|, there are (|nformals| + 1 + isConstructing) + // arguments to push to the stack. Then we push a JitFrameLayout. We + // compute the padding expressed in the number of extra |undefined| values + // to push on the stack. + static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0, + "No need to consider the JitFrameLayout for aligning the stack"); + static_assert(JitStackAlignment % sizeof(Value) == 0, + "Ensure that we can pad the stack by pushing extra UndefinedValue"); + + MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment)); + masm.add32(Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */), numArgsReg); + masm.add32(t2, numArgsReg); + masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg); + + // Load the number of |undefined|s to push into t1. + masm.as_dsubu(t1, numArgsReg, s3); + + // Caller: + // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp <- t2 + // '------ s3 -------' + // + // Rectifier frame: + // [undef] [undef] [undef] [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] + // '-------- t1 ---------' '------- s3 -------' + + // Copy number of actual arguments into numActArgsReg + masm.loadPtr(Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()), + numActArgsReg); + + + masm.moveValue(UndefinedValue(), ValueOperand(t0)); + + masm.movePtr(StackPointer, t2); // Save %sp. + + // Push undefined. (including the padding) + { + Label undefLoopTop; + + masm.bind(&undefLoopTop); + masm.sub32(Imm32(1), t1); + masm.subPtr(Imm32(sizeof(Value)), StackPointer); + masm.storeValue(ValueOperand(t0), Address(StackPointer, 0)); + + masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump); + } + + // Get the topmost argument. + static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments"); + + // | - sizeof(Value)| is used to put rcx such that we can read the last + // argument, and not the value which is after. + masm.ma_dsll(t0, s3, Imm32(3)); // t0 <- nargs * 8 + masm.as_daddu(t1, t2, t0); // t1 <- t2(saved sp) + nargs * 8 + masm.addPtr(Imm32(sizeof(RectifierFrameLayout) - sizeof(Value)), t1); + + // Copy & Push arguments, |nargs| + 1 times (to include |this|). + { + Label copyLoopTop; + + masm.bind(©LoopTop); + masm.sub32(Imm32(1), s3); + masm.subPtr(Imm32(sizeof(Value)), StackPointer); + masm.loadValue(Address(t1, 0), ValueOperand(t0)); + masm.storeValue(ValueOperand(t0), Address(StackPointer, 0)); + masm.subPtr(Imm32(sizeof(Value)), t1); + + masm.ma_b(s3, s3, ©LoopTop, Assembler::NonZero, ShortJump); + } + + // if constructing, copy newTarget + { + Label notConstructing; + + masm.branchTest32(Assembler::Zero, calleeTokenReg, Imm32(CalleeToken_FunctionConstructing), + ¬Constructing); + + // thisFrame[numFormals] = prevFrame[argc] + ValueOperand newTarget(t0); + + // +1 for |this|. We want vp[argc], so don't subtract 1 + BaseIndex newTargetSrc(t2, numActArgsReg, TimesEight, sizeof(RectifierFrameLayout) + sizeof(Value)); + masm.loadValue(newTargetSrc, newTarget); + + // Again, 1 for |this| + BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value)); + masm.storeValue(newTarget, newTargetDest); + + masm.bind(¬Constructing); + } + + // Caller: + // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- t2 + // + // + // Rectifier frame: + // [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [[argc] [callee] [descr] [raddr]] + + // Construct sizeDescriptor. + masm.subPtr(StackPointer, t2); + masm.makeFrameDescriptor(t2, JitFrame_Rectifier, JitFrameLayout::Size()); + + // Construct JitFrameLayout. + masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer); + // Push actual arguments. + masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t))); + // Push callee token. + masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t))); + // Push frame descriptor. + masm.storePtr(t2, Address(StackPointer, 0)); + + // Call the target function. + // Note that this code assumes the function is JITted. + masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg); + masm.loadPtr(Address(calleeTokenReg, JSFunction::offsetOfNativeOrScript()), t1); + masm.loadBaselineOrIonRaw(t1, t1, nullptr); + uint32_t returnOffset = masm.callJitNoProfiler(t1); + + // Remove the rectifier frame. + // t2 <- descriptor with FrameType. + masm.loadPtr(Address(StackPointer, 0), t2); + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t2); // t2 <- descriptor. + + // Discard descriptor, calleeToken and number of actual arguments. + masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer); + + // Discard pushed arguments. + masm.addPtr(t2, StackPointer); + + masm.ret(); + Linker linker(masm); + AutoFlushICache afc("ArgumentsRectifier"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + + if (returnAddrOut) + *returnAddrOut = (void*) (code->raw() + returnOffset); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "ArgumentsRectifier"); +#endif + + return code; +} + +/* - When bailout is done via out of line code (lazy bailout). + * Frame size is stored in $ra (look at + * CodeGeneratorMIPS64::generateOutOfLineCode()) and thunk code should save it + * on stack. Other difference is that members snapshotOffset_ and padding_ are + * pushed to the stack by CodeGeneratorMIPS64::visitOutOfLineBailout(). Field + * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID + * (See: JitRuntime::generateBailoutHandler). + */ +static void +PushBailoutFrame(MacroAssembler& masm, Register spArg) +{ + // Push the frameSize_ stored in ra + // See: CodeGeneratorMIPS64::generateOutOfLineCode() + masm.push(ra); + + // Push registers such that we can access them from [base + code]. + masm.PushRegsInMask(AllRegs); + + // Put pointer to BailoutStack as first argument to the Bailout() + masm.movePtr(StackPointer, spArg); +} + +static void +GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass) +{ + PushBailoutFrame(masm, a0); + + // Put pointer to BailoutInfo + static const uint32_t sizeOfBailoutInfo = sizeof(uintptr_t) * 2; + masm.subPtr(Imm32(sizeOfBailoutInfo), StackPointer); + masm.movePtr(StackPointer, a1); + + masm.setupAlignedABICall(); + masm.passABIArg(a0); + masm.passABIArg(a1); + masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout)); + + // Get BailoutInfo pointer + masm.loadPtr(Address(StackPointer, 0), a2); + + // Stack is: + // [frame] + // snapshotOffset + // frameSize + // [bailoutFrame] + // [bailoutInfo] + // + // Remove both the bailout frame and the topmost Ion frame's stack. + // Load frameSize from stack + masm.loadPtr(Address(StackPointer, + sizeOfBailoutInfo + BailoutStack::offsetOfFrameSize()), a1); + // Remove complete BailoutStack class and data after it + masm.addPtr(Imm32(sizeof(BailoutStack) + sizeOfBailoutInfo), StackPointer); + // Remove frame size srom stack + masm.addPtr(a1, StackPointer); + + // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2. + JitCode* bailoutTail = cx->runtime()->jitRuntime()->getBailoutTail(); + masm.branch(bailoutTail); +} + +JitCode* +JitRuntime::generateBailoutTable(JSContext* cx, uint32_t frameClass) +{ + MOZ_CRASH("MIPS64 does not use bailout tables"); +} + +JitCode* +JitRuntime::generateBailoutHandler(JSContext* cx) +{ + MacroAssembler masm(cx); + GenerateBailoutThunk(cx, masm, NO_FRAME_SIZE_CLASS_ID); + + Linker linker(masm); + AutoFlushICache afc("BailoutHandler"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "BailoutHandler"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f) +{ + MOZ_ASSERT(functionWrappers_); + MOZ_ASSERT(functionWrappers_->initialized()); + VMWrapperMap::AddPtr p = functionWrappers_->lookupForAdd(&f); + if (p) + return p->value(); + + MacroAssembler masm(cx); + + AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask); + + static_assert((Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0, + "Wrapper register set should be a superset of Volatile register set."); + + // The context is the first argument; a0 is the first argument register. + Register cxreg = a0; + regs.take(cxreg); + + // If it isn't a tail call, then the return address needs to be saved + if (f.expectTailCall == NonTailCall) + masm.pushReturnAddress(); + + // We're aligned to an exit frame, so link it up. + masm.enterExitFrame(&f); + masm.loadJSContext(cxreg); + + // Save the base of the argument set stored on the stack. + Register argsBase = InvalidReg; + if (f.explicitArgs) { + argsBase = t1; // Use temporary register. + regs.take(argsBase); + masm.ma_daddu(argsBase, StackPointer, Imm32(ExitFrameLayout::SizeWithFooter())); + } + + // Reserve space for the outparameter. + Register outReg = InvalidReg; + switch (f.outParam) { + case Type_Value: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(Value)); + masm.movePtr(StackPointer, outReg); + break; + + case Type_Handle: + outReg = regs.takeAny(); + masm.PushEmptyRooted(f.outParamRootType); + masm.movePtr(StackPointer, outReg); + break; + + case Type_Bool: + case Type_Int32: + outReg = regs.takeAny(); + // Reserve 4-byte space to make stack aligned to 8-byte. + masm.reserveStack(2 * sizeof(int32_t)); + masm.movePtr(StackPointer, outReg); + break; + + case Type_Pointer: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(uintptr_t)); + masm.movePtr(StackPointer, outReg); + break; + + case Type_Double: + outReg = regs.takeAny(); + masm.reserveStack(sizeof(double)); + masm.movePtr(StackPointer, outReg); + break; + + default: + MOZ_ASSERT(f.outParam == Type_Void); + break; + } + + if (!generateTLEnterVM(cx, masm, f)) + return nullptr; + + masm.setupUnalignedABICall(regs.getAny()); + masm.passABIArg(cxreg); + + size_t argDisp = 0; + + // Copy any arguments. + for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) { + MoveOperand from; + switch (f.argProperties(explicitArg)) { + case VMFunction::WordByValue: + if (f.argPassedInFloatReg(explicitArg)) + masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE); + else + masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL); + argDisp += sizeof(void*); + break; + case VMFunction::WordByRef: + masm.passABIArg(MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS), + MoveOp::GENERAL); + argDisp += sizeof(void*); + break; + case VMFunction::DoubleByValue: + case VMFunction::DoubleByRef: + MOZ_CRASH("NYI: MIPS64 callVM should not be used with 128bits values."); + break; + } + } + + // Copy the implicit outparam, if any. + if (InvalidReg != outReg) + masm.passABIArg(outReg); + + masm.callWithABI(f.wrapped); + + if (!generateTLExitVM(cx, masm, f)) + return nullptr; + + // Test for failure. + switch (f.failType()) { + case Type_Object: + masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel()); + break; + case Type_Bool: + // Called functions return bools, which are 0/false and non-zero/true + masm.branchIfFalseBool(v0, masm.failureLabel()); + break; + default: + MOZ_CRASH("unknown failure kind"); + } + + // Load the outparam and free any allocated stack. + switch (f.outParam) { + case Type_Handle: + masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand); + break; + + case Type_Value: + masm.loadValue(Address(StackPointer, 0), JSReturnOperand); + masm.freeStack(sizeof(Value)); + break; + + case Type_Int32: + masm.load32(Address(StackPointer, 0), ReturnReg); + masm.freeStack(2 * sizeof(int32_t)); + break; + + case Type_Pointer: + masm.loadPtr(Address(StackPointer, 0), ReturnReg); + masm.freeStack(sizeof(uintptr_t)); + break; + + case Type_Bool: + masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg); + masm.freeStack(2 * sizeof(int32_t)); + break; + + case Type_Double: + if (cx->runtime()->jitSupportsFloatingPoint) { + masm.as_ld(ReturnDoubleReg, StackPointer, 0); + } else { + masm.assumeUnreachable("Unable to load into float reg, with no FP support."); + } + masm.freeStack(sizeof(double)); + break; + + default: + MOZ_ASSERT(f.outParam == Type_Void); + break; + } + + masm.leaveExitFrame(); + masm.retn(Imm32(sizeof(ExitFrameLayout) + + f.explicitStackSlots() * sizeof(void*) + + f.extraValuesToPop * sizeof(Value))); + + Linker linker(masm); + AutoFlushICache afc("VMWrapper"); + JitCode* wrapper = linker.newCode<NoGC>(cx, OTHER_CODE); + if (!wrapper) + return nullptr; + + // linker.newCode may trigger a GC and sweep functionWrappers_ so we have + // to use relookupOrAdd instead of add. + if (!functionWrappers_->relookupOrAdd(p, &f, wrapper)) + return nullptr; + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(wrapper, "VMWrapper"); +#endif + + return wrapper; +} + +JitCode* +JitRuntime::generatePreBarrier(JSContext* cx, MIRType type) +{ + MacroAssembler masm(cx); + + LiveRegisterSet save; + if (cx->runtime()->jitSupportsFloatingPoint) { + save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask), + FloatRegisterSet(FloatRegisters::VolatileMask)); + } else { + save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask), + FloatRegisterSet()); + } + save.add(ra); + masm.PushRegsInMask(save); + + MOZ_ASSERT(PreBarrierReg == a1); + masm.movePtr(ImmPtr(cx->runtime()), a0); + + masm.setupUnalignedABICall(a2); + masm.passABIArg(a0); + masm.passABIArg(a1); + masm.callWithABI(IonMarkFunction(type)); + + save.take(AnyRegister(ra)); + masm.PopRegsInMask(save); + masm.ret(); + + Linker linker(masm); + AutoFlushICache afc("PreBarrier"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "PreBarrier"); +#endif + + return code; +} + +typedef bool (*HandleDebugTrapFn)(JSContext*, BaselineFrame*, uint8_t*, bool*); +static const VMFunction HandleDebugTrapInfo = + FunctionInfo<HandleDebugTrapFn>(HandleDebugTrap, "HandleDebugTrap"); + +JitCode* +JitRuntime::generateDebugTrapHandler(JSContext* cx) +{ + MacroAssembler masm(cx); + + Register scratch1 = t0; + Register scratch2 = t1; + + // Load BaselineFrame pointer in scratch1. + masm.movePtr(s5, scratch1); + masm.subPtr(Imm32(BaselineFrame::Size()), scratch1); + + // Enter a stub frame and call the HandleDebugTrap VM function. Ensure + // the stub frame has a nullptr ICStub pointer, since this pointer is + // marked during GC. + masm.movePtr(ImmPtr(nullptr), ICStubReg); + EmitBaselineEnterStubFrame(masm, scratch2); + + JitCode* code = cx->runtime()->jitRuntime()->getVMWrapper(HandleDebugTrapInfo); + if (!code) + return nullptr; + + masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer); + masm.storePtr(ra, Address(StackPointer, sizeof(uintptr_t))); + masm.storePtr(scratch1, Address(StackPointer, 0)); + + EmitBaselineCallVM(code, masm); + + EmitBaselineLeaveStubFrame(masm); + + // If the stub returns |true|, we have to perform a forced return + // (return from the JS frame). If the stub returns |false|, just return + // from the trap stub so that execution continues at the current pc. + Label forcedReturn; + masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg, &forcedReturn); + + // ra was restored by EmitLeaveStubFrame + masm.branch(ra); + + masm.bind(&forcedReturn); + masm.loadValue(Address(s5, BaselineFrame::reverseOffsetOfReturnValue()), + JSReturnOperand); + masm.movePtr(s5, StackPointer); + masm.pop(s5); + + // Before returning, if profiling is turned on, make sure that lastProfilingFrame + // is set to the correct caller frame. + { + Label skipProfilingInstrumentation; + AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled()); + masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skipProfilingInstrumentation); + masm.profilerExitFrame(); + masm.bind(&skipProfilingInstrumentation); + } + + masm.ret(); + + Linker linker(masm); + AutoFlushICache afc("DebugTrapHandler"); + JitCode* codeDbg = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(codeDbg, "DebugTrapHandler"); +#endif + + return codeDbg; +} + + +JitCode* +JitRuntime::generateExceptionTailStub(JSContext* cx, void* handler) +{ + MacroAssembler masm; + + masm.handleFailureWithHandlerTail(handler); + + Linker linker(masm); + AutoFlushICache afc("ExceptionTailStub"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "ExceptionTailStub"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateBailoutTailStub(JSContext* cx) +{ + MacroAssembler masm; + + masm.generateBailoutTail(a1, a2); + + Linker linker(masm); + AutoFlushICache afc("BailoutTailStub"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "BailoutTailStub"); +#endif + + return code; +} + +JitCode* +JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx) +{ + MacroAssembler masm; + + Register scratch1 = t0; + Register scratch2 = t1; + Register scratch3 = t2; + Register scratch4 = t3; + + // + // The code generated below expects that the current stack pointer points + // to an Ion or Baseline frame, at the state it would be immediately + // before a ret(). Thus, after this stub's business is done, it executes + // a ret() and returns directly to the caller script, on behalf of the + // callee script that jumped to this code. + // + // Thus the expected stack is: + // + // StackPointer ----+ + // v + // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr + // MEM-HI MEM-LOW + // + // + // The generated jitcode is responsible for overwriting the + // jitActivation->lastProfilingFrame field with a pointer to the previous + // Ion or Baseline jit-frame that was pushed before this one. It is also + // responsible for overwriting jitActivation->lastProfilingCallSite with + // the return address into that frame. The frame could either be an + // immediate "caller" frame, or it could be a frame in a previous + // JitActivation (if the current frame was entered from C++, and the C++ + // was entered by some caller jit-frame further down the stack). + // + // So this jitcode is responsible for "walking up" the jit stack, finding + // the previous Ion or Baseline JS frame, and storing its address and the + // return address into the appropriate fields on the current jitActivation. + // + // There are a fixed number of different path types that can lead to the + // current frame, which is either a baseline or ion frame: + // + // <Baseline-Or-Ion> + // ^ + // | + // ^--- Ion + // | + // ^--- Baseline Stub <---- Baseline + // | + // ^--- Argument Rectifier + // | ^ + // | | + // | ^--- Ion + // | | + // | ^--- Baseline Stub <---- Baseline + // | + // ^--- Entry Frame (From C++) + // + Register actReg = scratch4; + AbsoluteAddress activationAddr(GetJitContext()->runtime->addressOfProfilingActivation()); + masm.loadPtr(activationAddr, actReg); + + Address lastProfilingFrame(actReg, JitActivation::offsetOfLastProfilingFrame()); + Address lastProfilingCallSite(actReg, JitActivation::offsetOfLastProfilingCallSite()); + +#ifdef DEBUG + // Ensure that frame we are exiting is current lastProfilingFrame + { + masm.loadPtr(lastProfilingFrame, scratch1); + Label checkOk; + masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk); + masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk); + masm.assumeUnreachable( + "Mismatch between stored lastProfilingFrame and current stack pointer."); + masm.bind(&checkOk); + } +#endif + + // Load the frame descriptor into |scratch1|, figure out what to do depending on its type. + masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()), scratch1); + + // Going into the conditionals, we will have: + // FrameDescriptor.size in scratch1 + // FrameDescriptor.type in scratch2 + masm.ma_and(scratch2, scratch1, Imm32((1 << FRAMETYPE_BITS) - 1)); + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1); + + // Handling of each case is dependent on FrameDescriptor.type + Label handle_IonJS; + Label handle_BaselineStub; + Label handle_Rectifier; + Label handle_IonAccessorIC; + Label handle_Entry; + Label end; + + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonJS), &handle_IonJS); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineJS), &handle_IonJS); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_BaselineStub), &handle_BaselineStub); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Rectifier), &handle_Rectifier); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_IonAccessorIC), &handle_IonAccessorIC); + masm.branch32(Assembler::Equal, scratch2, Imm32(JitFrame_Entry), &handle_Entry); + + masm.assumeUnreachable("Invalid caller frame type when exiting from Ion frame."); + + // + // JitFrame_IonJS + // + // Stack layout: + // ... + // Ion-Descriptor + // Prev-FP ---> Ion-ReturnAddr + // ... previous frame data ... |- Descriptor.Size + // ... arguments ... | + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + // + masm.bind(&handle_IonJS); + { + // |scratch1| contains Descriptor.size + + // returning directly to an IonJS frame. Store return addr to frame + // in lastProfilingCallSite. + masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()), scratch2); + masm.storePtr(scratch2, lastProfilingCallSite); + + // Store return frame in lastProfilingFrame. + // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size(); + masm.as_daddu(scratch2, StackPointer, scratch1); + masm.ma_daddu(scratch2, scratch2, Imm32(JitFrameLayout::Size())); + masm.storePtr(scratch2, lastProfilingFrame); + masm.ret(); + } + + // + // JitFrame_BaselineStub + // + // Look past the stub and store the frame pointer to + // the baselineJS frame prior to it. + // + // Stack layout: + // ... + // BL-Descriptor + // Prev-FP ---> BL-ReturnAddr + // +-----> BL-PrevFramePointer + // | ... BL-FrameData ... + // | BLStub-Descriptor + // | BLStub-ReturnAddr + // | BLStub-StubPointer | + // +------ BLStub-SavedFramePointer |- Descriptor.Size + // ... arguments ... | + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + // + // We take advantage of the fact that the stub frame saves the frame + // pointer pointing to the baseline frame, so a bunch of calculation can + // be avoided. + // + masm.bind(&handle_BaselineStub); + { + masm.as_daddu(scratch3, StackPointer, scratch1); + Address stubFrameReturnAddr(scratch3, + JitFrameLayout::Size() + + BaselineStubFrameLayout::offsetOfReturnAddress()); + masm.loadPtr(stubFrameReturnAddr, scratch2); + masm.storePtr(scratch2, lastProfilingCallSite); + + Address stubFrameSavedFramePtr(scratch3, + JitFrameLayout::Size() - (2 * sizeof(void*))); + masm.loadPtr(stubFrameSavedFramePtr, scratch2); + masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr + masm.storePtr(scratch2, lastProfilingFrame); + masm.ret(); + } + + + // + // JitFrame_Rectifier + // + // The rectifier frame can be preceded by either an IonJS or a + // BaselineStub frame. + // + // Stack layout if caller of rectifier was Ion: + // + // Ion-Descriptor + // Ion-ReturnAddr + // ... ion frame data ... |- Rect-Descriptor.Size + // < COMMON LAYOUT > + // + // Stack layout if caller of rectifier was Baseline: + // + // BL-Descriptor + // Prev-FP ---> BL-ReturnAddr + // +-----> BL-SavedFramePointer + // | ... baseline frame data ... + // | BLStub-Descriptor + // | BLStub-ReturnAddr + // | BLStub-StubPointer | + // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size + // ... args to rectifier ... | + // < COMMON LAYOUT > + // + // Common stack layout: + // + // ActualArgc | + // CalleeToken |- IonRectitiferFrameLayout::Size() + // Rect-Descriptor | + // Rect-ReturnAddr | + // ... rectifier data & args ... |- Descriptor.Size + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + // + masm.bind(&handle_Rectifier); + { + // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size(); + masm.as_daddu(scratch2, StackPointer, scratch1); + masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2); + masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()), scratch3); + masm.ma_dsrl(scratch1, scratch3, Imm32(FRAMESIZE_SHIFT)); + masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3); + + // Now |scratch1| contains Rect-Descriptor.Size + // and |scratch2| points to Rectifier frame + // and |scratch3| contains Rect-Descriptor.Type + + // Check for either Ion or BaselineStub frame. + Label handle_Rectifier_BaselineStub; + masm.branch32(Assembler::NotEqual, scratch3, Imm32(JitFrame_IonJS), + &handle_Rectifier_BaselineStub); + + // Handle Rectifier <- IonJS + // scratch3 := RectFrame[ReturnAddr] + masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()), scratch3); + masm.storePtr(scratch3, lastProfilingCallSite); + + // scratch3 := RectFrame + Rect-Descriptor.Size + RectifierFrameLayout::Size() + masm.as_daddu(scratch3, scratch2, scratch1); + masm.addPtr(Imm32(RectifierFrameLayout::Size()), scratch3); + masm.storePtr(scratch3, lastProfilingFrame); + masm.ret(); + + // Handle Rectifier <- BaselineStub <- BaselineJS + masm.bind(&handle_Rectifier_BaselineStub); +#ifdef DEBUG + { + Label checkOk; + masm.branch32(Assembler::Equal, scratch3, Imm32(JitFrame_BaselineStub), &checkOk); + masm.assumeUnreachable("Unrecognized frame preceding baselineStub."); + masm.bind(&checkOk); + } +#endif + masm.as_daddu(scratch3, scratch2, scratch1); + Address stubFrameReturnAddr(scratch3, RectifierFrameLayout::Size() + + BaselineStubFrameLayout::offsetOfReturnAddress()); + masm.loadPtr(stubFrameReturnAddr, scratch2); + masm.storePtr(scratch2, lastProfilingCallSite); + + Address stubFrameSavedFramePtr(scratch3, + RectifierFrameLayout::Size() - (2 * sizeof(void*))); + masm.loadPtr(stubFrameSavedFramePtr, scratch2); + masm.addPtr(Imm32(sizeof(void*)), scratch2); + masm.storePtr(scratch2, lastProfilingFrame); + masm.ret(); + } + + // JitFrame_IonAccessorIC + // + // The caller is always an IonJS frame. + // + // Ion-Descriptor + // Ion-ReturnAddr + // ... ion frame data ... |- AccFrame-Descriptor.Size + // StubCode | + // AccFrame-Descriptor |- IonAccessorICFrameLayout::Size() + // AccFrame-ReturnAddr | + // ... accessor frame data & args ... |- Descriptor.Size + // ActualArgc | + // CalleeToken |- JitFrameLayout::Size() + // Descriptor | + // FP -----> ReturnAddr | + masm.bind(&handle_IonAccessorIC); + { + // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size() + masm.as_daddu(scratch2, StackPointer, scratch1); + masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2); + + // scratch3 := AccFrame-Descriptor.Size + masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfDescriptor()), scratch3); +#ifdef DEBUG + // Assert previous frame is an IonJS frame. + masm.movePtr(scratch3, scratch1); + masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1); + { + Label checkOk; + masm.branch32(Assembler::Equal, scratch1, Imm32(JitFrame_IonJS), &checkOk); + masm.assumeUnreachable("IonAccessorIC frame must be preceded by IonJS frame"); + masm.bind(&checkOk); + } +#endif + masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3); + + // lastProfilingCallSite := AccFrame-ReturnAddr + masm.loadPtr(Address(scratch2, IonAccessorICFrameLayout::offsetOfReturnAddress()), scratch1); + masm.storePtr(scratch1, lastProfilingCallSite); + + // lastProfilingFrame := AccessorFrame + AccFrame-Descriptor.Size + + // IonAccessorICFrameLayout::Size() + masm.as_daddu(scratch1, scratch2, scratch3); + masm.addPtr(Imm32(IonAccessorICFrameLayout::Size()), scratch1); + masm.storePtr(scratch1, lastProfilingFrame); + masm.ret(); + } + + // + // JitFrame_Entry + // + // If at an entry frame, store null into both fields. + // + masm.bind(&handle_Entry); + { + masm.movePtr(ImmPtr(nullptr), scratch1); + masm.storePtr(scratch1, lastProfilingCallSite); + masm.storePtr(scratch1, lastProfilingFrame); + masm.ret(); + } + + Linker linker(masm); + AutoFlushICache afc("ProfilerExitFrameTailStub"); + JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE); + +#ifdef JS_ION_PERF + writePerfSpewerJitCodeProfile(code, "ProfilerExitFrameStub"); +#endif + + return code; +} |