summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips-shared
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/mips-shared')
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.cpp210
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h83
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp51
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.cpp4
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp357
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h37
6 files changed, 627 insertions, 115 deletions
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
index f813eb946..97f27c2e6 100644
--- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -92,6 +92,7 @@ void
AssemblerMIPSShared::finish()
{
MOZ_ASSERT(!isFinished);
+ GenerateMixedJumps();
isFinished = true;
}
@@ -100,13 +101,26 @@ AssemblerMIPSShared::asmMergeWith(const AssemblerMIPSShared& other)
{
if (!AssemblerShared::asmMergeWith(size(), other))
return false;
- for (size_t i = 0; i < other.numLongJumps(); i++) {
- size_t off = other.longJumps_[i];
- addLongJump(BufferOffset(size() + off));
+ for (size_t i = 0; i < other.numMixedJumps(); i++) {
+ const MixedJumpPatch& mjp = other.mixedJumps_[i];
+ addMixedJump(BufferOffset(size() + mjp.src.getOffset()),
+ size() + mjp.target, mjp.kind);
}
return m_buffer.appendBuffer(other.m_buffer);
}
+void
+AssemblerMIPSShared::executableCopy(uint8_t* buffer)
+{
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+
+ // Patch all mixed jumps during code copy.
+ PatchMixedJumps(buffer);
+
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
uint32_t
AssemblerMIPSShared::actualIndex(uint32_t idx_) const
{
@@ -1588,6 +1602,92 @@ AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target)
}
void
+AssemblerMIPSShared::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
+{
+ intptr_t offset = target - branch;
+
+ // Generate the patchable mixed jump for call.
+ if (inst->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) {
+ addMixedJump(BufferOffset(branch), target);
+ return;
+ }
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ MixedJumpPatch::Kind kind = MixedJumpPatch::NONE;
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ if (inst[0].encode() != inst_beq.encode())
+ kind = MixedJumpPatch::CONDITIONAL;
+
+ addMixedJump(BufferOffset(branch), target, kind);
+}
+
+void
+AssemblerMIPSShared::bind(RepatchLabel* label)
+{
+ BufferOffset dest = nextOffset();
+ if (label->used() && !oom()) {
+ // If the label has a use, then change this use to refer to
+ // the bound label;
+ BufferOffset b(label->offset());
+ InstImm* inst = (InstImm*)editSrc(b);
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ intptr_t offset = dest.getOffset() - label->offset();
+
+ // If first instruction is j, then this is a mixed jump.
+ // If second instruction is lui, then this is a loop backedge.
+ if (inst[0].extractOpcode() == (uint32_t(op_j) >> OpcodeShift)) {
+ // For unconditional mixed branches generated by jumpWithPatch
+ addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE);
+ } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
+ BOffImm16::IsInRange(offset))
+ {
+ // Handle code produced by:
+ // backedgeJump
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
+ inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ } else if (inst[0].encode() == inst_beq.encode()) {
+ // Handle open mixed unconditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ // We need to add it to mixed jumps array here.
+ // See MacroAssemblerMIPS::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE);
+ inst[0] = InstJump(op_j, JOffImm26(0)).encode();
+ } else {
+ // Handle open mixed conditional jumps created by
+ // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
+ inst[0] = invertBranch(inst[0], BOffImm16(4 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ // We need to add it to mixed jumps array here.
+ // See MacroAssemblerMIPS::branchWithCode().
+ MOZ_ASSERT(inst[1].encode() == NopInst);
+ MOZ_ASSERT(inst[2].encode() == NopInst);
+ MOZ_ASSERT(inst[3].encode() == NopInst);
+ addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE);
+ inst[2] = InstJump(op_j, JOffImm26(0)).encode();
+ }
+ }
+ label->bind(dest.getOffset());
+}
+
+void
AssemblerMIPSShared::retarget(Label* label, Label* target)
{
if (label->used() && !oom()) {
@@ -1653,6 +1753,25 @@ AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm)
*(raw - 1) = imm.value;
}
+uint32_t
+AssemblerMIPSShared::PatchWrite_NearCallSize()
+{
+ return 2 * sizeof(uint32_t);
+}
+
+void
+AssemblerMIPSShared::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
+{
+ Instruction* inst = (Instruction*) start.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ inst[0] = InstJump(op_jal, JOffImm26(uintptr_t(toCall.raw())));
+ inst[1] = InstNOP();
+
+ // Ensure everyone sees the code that was just written into memory.
+ AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
+}
+
uint8_t*
AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
{
@@ -1662,6 +1781,82 @@ AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
return reinterpret_cast<uint8_t*>(inst->next());
}
+Instruction*
+AssemblerMIPSShared::GetInstructionImmediateFromJump(Instruction* jump)
+{
+ if (jump->extractOpcode() == ((uint32_t)op_j >> OpcodeShift) ||
+ jump->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift))
+ {
+ InstJump* j = (InstJump*) jump;
+ uintptr_t base = (uintptr_t(j) >> Imm28Bits) << Imm28Bits;
+ uint32_t index = j->extractImm26Value() << 2;
+
+ jump = (Instruction*)(base | index);
+ if (jump->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift))
+ jump = jump->next();
+ }
+
+ return jump;
+}
+
+void
+AssemblerMIPSShared::PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target)
+{
+ InstImm* b = (InstImm*)src;
+ uint32_t opcode = b->extractOpcode();
+ int offset;
+
+ if (mid) {
+ int o = 0;
+ InstImm* insn = (InstImm*)mid;
+
+ offset = intptr_t(mid);
+ if (insn->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift)) {
+ o = 1 * sizeof(uint32_t);
+ Assembler::PatchInstructionImmediate(mid + Assembler::InstructionImmediateSize() +
+ 2 * sizeof(uint32_t), PatchedImmPtr(&b[2]));
+ }
+ Assembler::PatchInstructionImmediate(mid + o, PatchedImmPtr(target));
+ } else {
+ offset = intptr_t(target);
+ }
+
+ if (((uint32_t)op_j >> OpcodeShift) == opcode ||
+ ((uint32_t)op_jal >> OpcodeShift) == opcode)
+ {
+ InstJump* j = (InstJump*)b;
+
+ j->setJOffImm26(JOffImm26(offset));
+ } else {
+ b[0] = InstJump(op_j, JOffImm26(offset)).encode();
+ }
+}
+
+void
+AssemblerMIPSShared::PatchMixedJumps(uint8_t* buffer)
+{
+ // Patch all mixed jumps.
+ for (size_t i = 0; i < numMixedJumps(); i++) {
+ MixedJumpPatch& mjp = mixedJump(i);
+ uint8_t* src = buffer + mjp.src.getOffset();
+ uint8_t* mid = nullptr;
+ uint8_t* target = buffer + mjp.target;
+ InstImm* b = (InstImm*)src;
+
+ if (mjp.mid.assigned()) {
+ mid = buffer + mjp.mid.getOffset();
+ if (MixedJumpPatch::CONDITIONAL & mjp.kind) {
+ InstImm* bc = (InstImm*)(buffer + mjp.mid.getOffset());
+ BOffImm16 offset(Assembler::InstructionImmediateSize() + 2 * sizeof(uint32_t));
+ bc[0] = invertBranch(b[0], offset);
+ }
+ }
+
+ PatchMixedJump(src, mid, target);
+ b[1].makeNop();
+ }
+}
+
// Since there are no pools in MIPS implementation, this should be simple.
Instruction*
Instruction::next()
@@ -1744,3 +1939,12 @@ AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_)
AutoFlushICache::flush(uintptr_t(inst), 4);
}
+void
+AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value)
+{
+ MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ ((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value)));
+ ((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value)));
+}
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
index a619fa0e0..1640fdafc 100644
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -525,21 +525,13 @@ class JOffImm26
}
int32_t decode() {
MOZ_ASSERT(!isInvalid());
- return (int32_t(data << 8) >> 6) + 4;
+ return int32_t(data << 8) >> 6;
}
explicit JOffImm26(int offset)
- : data ((offset - 4) >> 2 & Imm26Mask)
+ : data (offset >> 2 & Imm26Mask)
{
MOZ_ASSERT((offset & 0x3) == 0);
- MOZ_ASSERT(IsInRange(offset));
- }
- static bool IsInRange(int offset) {
- if ((offset - 4) < -536870912)
- return false;
- if ((offset - 4) > 536870908)
- return false;
- return true;
}
static const uint32_t INVALID = 0x20000000;
JOffImm26()
@@ -840,6 +832,27 @@ class AssemblerMIPSShared : public AssemblerShared
TestForFalse
};
+ struct MixedJumpPatch
+ {
+ enum Kind {
+ NONE,
+ PATCHABLE,
+ CONDITIONAL,
+ };
+
+ BufferOffset src;
+ BufferOffset mid;
+ uintptr_t target;
+ Kind kind;
+
+ MixedJumpPatch(BufferOffset src, uintptr_t target, Kind kind)
+ : src(src),
+ mid(BufferOffset()),
+ target(target),
+ kind(kind)
+ { }
+ };
+
// :( this should be protected, but since CodeGenerator
// wants to use it, It needs to go out here :(
@@ -873,7 +886,7 @@ class AssemblerMIPSShared : public AssemblerShared
};
js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
- js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
+ js::Vector<MixedJumpPatch, 8, SystemAllocPolicy> mixedJumps_;
CompactBufferWriter jumpRelocations_;
CompactBufferWriter dataRelocations_;
@@ -922,7 +935,9 @@ class AssemblerMIPSShared : public AssemblerShared
public:
void finish();
bool asmMergeWith(const AssemblerMIPSShared& other);
- void executableCopy(void* buffer);
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);
void copyPreBarrierTable(uint8_t* dest);
@@ -1197,8 +1212,9 @@ class AssemblerMIPSShared : public AssemblerShared
// label operations
void bind(Label* label, BufferOffset boff = BufferOffset());
void bindLater(Label* label, wasm::TrapDesc target);
- virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
virtual void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) = 0;
+ void bind(RepatchLabel* label);
void bind(CodeOffset* label) {
label->bind(currentOffset());
}
@@ -1240,16 +1256,21 @@ class AssemblerMIPSShared : public AssemblerShared
writeRelocation(src);
}
- void addLongJump(BufferOffset src) {
- enoughMemory_ &= longJumps_.append(src.getOffset());
+ void addMixedJump(BufferOffset src, uintptr_t target,
+ MixedJumpPatch::Kind kind = MixedJumpPatch::NONE)
+ {
+ enoughMemory_ &= mixedJumps_.append(MixedJumpPatch(src, target, kind));
}
+ virtual void GenerateMixedJumps() = 0;
+ void PatchMixedJumps(uint8_t* buffer);
+
public:
- size_t numLongJumps() const {
- return longJumps_.length();
+ size_t numMixedJumps() const {
+ return mixedJumps_.length();
}
- uint32_t longJump(size_t i) {
- return longJumps_[i];
+ MixedJumpPatch& mixedJump(size_t i) {
+ return mixedJumps_[i];
}
void flushBuffer() {
@@ -1261,18 +1282,24 @@ class AssemblerMIPSShared : public AssemblerShared
}
static uint32_t NopSize() { return 4; }
+ static uint32_t PatchWrite_NearCallSize();
static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+ static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static uint32_t AlignDoubleArg(uint32_t offset) {
return (offset + 1U) &~ 1U;
}
static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr);
+ static Instruction* GetInstructionImmediateFromJump(Instruction* jump);
+ static void PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target);
static void ToggleToJmp(CodeLocationLabel inst_);
static void ToggleToCmp(CodeLocationLabel inst_);
+ static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value);
+
void processCodeLabels(uint8_t* rawCode);
bool bailed() {
@@ -1487,6 +1514,10 @@ class InstJump : public Instruction
uint32_t extractImm26Value() {
return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
}
+ void setJOffImm26(JOffImm26 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm26Mask) | off.encode();
+ }
};
// Class for Loongson-specific instructions
@@ -1516,6 +1547,20 @@ class InstGS : public Instruction
{ }
};
+inline bool
+IsUnaligned(const wasm::MemoryAccessDesc& access)
+{
+ if (!access.align())
+ return false;
+
+#ifdef JS_CODEGEN_MIPS32
+ if (access.type() == Scalar::Int64 && access.align() >= 4)
+ return false;
+#endif
+
+ return access.align() < access.byteSize();
+}
+
} // namespace jit
} // namespace js
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
index f3c776f42..d43dbbf38 100644
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1915,16 +1915,18 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
BaseIndex address(HeapReg, ptr, TimesOne);
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
+ FloatRegister output = ToFloatRegister(lir->output());
+
if (byteSize == 4)
- masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
+ masm.loadUnalignedFloat32(mir->access(), address, temp, output);
else
- masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
+ masm.loadUnalignedDouble(mir->access(), address, temp, output);
} else {
- masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
+ masm.ma_load_unaligned(mir->access(), ToRegister(lir->output()), address, temp,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
@@ -1934,16 +1936,20 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
}
if (isFloat) {
- if (byteSize == 4)
- masm.loadFloat32(address, ToFloatRegister(lir->output()));
- else
- masm.loadDouble(address, ToFloatRegister(lir->output()));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ if (byteSize == 4) {
+ masm.loadFloat32(address, output);
+ } else {
+ masm.computeScaledAddress(address, SecondScratchReg);
+ masm.as_ld(output, SecondScratchReg, 0);
+ }
} else {
masm.ma_load(ToRegister(lir->output()), address,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
-
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@@ -2000,16 +2006,18 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
BaseIndex address(HeapReg, ptr, TimesOne);
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
+ FloatRegister value = ToFloatRegister(lir->value());
+
if (byteSize == 4)
- masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
+ masm.storeUnalignedFloat32(mir->access(), value, temp, address);
else
- masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
+ masm.storeUnalignedDouble(mir->access(), value, temp, address);
} else {
- masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
+ masm.ma_store_unaligned(mir->access(), ToRegister(lir->value()), address, temp,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
@@ -2019,16 +2027,23 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
}
if (isFloat) {
+ FloatRegister value = ToFloatRegister(lir->value());
+
if (byteSize == 4) {
- masm.storeFloat32(ToFloatRegister(lir->value()), address);
- } else
- masm.storeDouble(ToFloatRegister(lir->value()), address);
+ masm.storeFloat32(value, address);
+ } else {
+ // For time being storeDouble for mips32 uses two store instructions,
+ // so we emit only one to get correct behavior in case of OOB access.
+ masm.computeScaledAddress(address, SecondScratchReg);
+ masm.as_sd(value, SecondScratchReg, 0);
+ }
} else {
masm.ma_store(ToRegister(lir->value()), address,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
-
+ // Only the last emitted instruction is a memory access.
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@@ -2412,7 +2427,7 @@ CodeGeneratorMIPSShared::visitUDivOrMod(LUDivOrMod* ins)
if (ins->canBeDivideByZero()) {
if (ins->mir()->isTruncated()) {
if (ins->trapOnError()) {
- masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::InvalidConversionToInteger), Assembler::Zero);
+ masm.ma_b(rhs, rhs, trap(ins, wasm::Trap::IntegerDivideByZero), Assembler::Zero);
} else {
// Infinity|0 == 0
Label notzero;
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
index f328d16f7..8c78f56b7 100644
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -324,7 +324,7 @@ LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
LAllocation ptr = useRegisterAtStart(base);
- if (ins->access().isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
if (ins->type() == MIRType::Int64) {
auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
if (ins->access().offset())
@@ -367,7 +367,7 @@ LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
MDefinition* value = ins->value();
LAllocation baseAlloc = useRegisterAtStart(base);
- if (ins->access().isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
if (ins->type() == MIRType::Int64) {
LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
index 18997e542..0676863b9 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -39,6 +39,17 @@ MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm)
}
}
+// This method generates lui and ori instruction pair that can be modified by
+// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPSShared::ma_liPatchable(Register dest, Imm32 imm)
+{
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+}
+
// Shifts
void
MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift)
@@ -225,8 +236,14 @@ template <typename L>
void
MacroAssemblerMIPSShared::ma_addTestCarry(Register rd, Register rs, Register rt, L overflow)
{
- as_addu(rd, rs, rt);
- as_sltu(SecondScratchReg, rd, rs);
+ if (rd != rs) {
+ as_addu(rd, rs, rt);
+ as_sltu(SecondScratchReg, rd, rs);
+ } else {
+ ma_move(SecondScratchReg, rs);
+ as_addu(rd, rs, rt);
+ as_sltu(SecondScratchReg, rd, SecondScratchReg);
+ }
ma_b(SecondScratchReg, SecondScratchReg, overflow, Assembler::NonZero);
}
@@ -446,7 +463,7 @@ MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
}
void
-MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+MacroAssemblerMIPSShared::ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp,
LoadStoreSize size, LoadStoreExtension extension)
{
int16_t lowOffset, hiOffset;
@@ -460,36 +477,41 @@ MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src,
hiOffset = Imm16(src.offset + size / 8 - 1).encode();
} else {
ma_li(ScratchRegister, Imm32(src.offset));
- as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
base = ScratchRegister;
lowOffset = Imm16(0).encode();
hiOffset = Imm16(size / 8 - 1).encode();
}
+ BufferOffset load;
switch (size) {
case SizeHalfWord:
- as_lbu(dest, base, lowOffset);
if (extension != ZeroExtend)
- as_lbu(temp, base, hiOffset);
+ load = as_lbu(temp, base, hiOffset);
else
- as_lb(temp, base, hiOffset);
+ load = as_lb(temp, base, hiOffset);
+ as_lbu(dest, base, lowOffset);
as_ins(dest, temp, 8, 24);
break;
case SizeWord:
- as_lwl(dest, base, hiOffset);
+ load = as_lwl(dest, base, hiOffset);
as_lwr(dest, base, lowOffset);
#ifdef JS_CODEGEN_MIPS64
if (extension != ZeroExtend)
as_dext(dest, dest, 0, 32);
#endif
break;
+#ifdef JS_CODEGEN_MIPS64
case SizeDouble:
- as_ldl(dest, base, hiOffset);
+ load = as_ldl(dest, base, hiOffset);
as_ldr(dest, base, lowOffset);
break;
+#endif
default:
MOZ_CRASH("Invalid argument for ma_load");
}
+
+ append(access, load.getOffset(), asMasm().framePushed());
}
void
@@ -593,13 +615,13 @@ MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
}
void
-MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+MacroAssemblerMIPSShared::ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp,
LoadStoreSize size, LoadStoreExtension extension)
{
int16_t lowOffset, hiOffset;
Register base;
- asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
base = SecondScratchReg;
@@ -607,29 +629,87 @@ MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& des
hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
- as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
base = ScratchRegister;
lowOffset = Imm16(0).encode();
hiOffset = Imm16(size / 8 - 1).encode();
}
+ BufferOffset store;
switch (size) {
case SizeHalfWord:
- as_sb(data, base, lowOffset);
as_ext(temp, data, 8, 8);
- as_sb(temp, base, hiOffset);
+ store = as_sb(temp, base, hiOffset);
+ as_sb(data, base, lowOffset);
break;
case SizeWord:
- as_swl(data, base, hiOffset);
+ store = as_swl(data, base, hiOffset);
as_swr(data, base, lowOffset);
break;
+#ifdef JS_CODEGEN_MIPS64
case SizeDouble:
- as_sdl(data, base, hiOffset);
+ store = as_sdl(data, base, hiOffset);
as_sdr(data, base, lowOffset);
break;
+#endif
default:
MOZ_CRASH("Invalid argument for ma_store");
}
+
+ append(access, store.getOffset(), asMasm().framePushed());
+}
+
+void
+MacroAssemblerMIPSShared::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
+{
+ MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset))
+ jumpKind = ShortJump;
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ if (code.encode() == inst_beq.encode()) {
+ // Handle mixed jump
+ addMixedJump(nextOffset(), label->offset());
+ as_j(JOffImm26(0));
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch
+ addMixedJump(nextOffset(), label->offset(), MixedJumpPatch::CONDITIONAL);
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ // Generate mixed jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ if (jumpKind == ShortJump) {
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+ }
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
}
// Branches when done from within mips-specific code.
@@ -639,7 +719,7 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Conditi
switch (c) {
case Equal :
case NotEqual:
- asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
break;
case Always:
ma_b(label, jumpKind);
@@ -649,11 +729,11 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label, Conditi
case Signed:
case NotSigned:
MOZ_ASSERT(lhs == rhs);
- asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ branchWithCode(getBranchCode(lhs, c), label, jumpKind);
break;
default:
Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
- asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
+ branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
break;
}
}
@@ -668,11 +748,19 @@ MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label, Condition
else if (c == Below)
; // This condition is always false. No branch required.
else
- asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ branchWithCode(getBranchCode(lhs, c), label, jumpKind);
} else {
- MOZ_ASSERT(lhs != ScratchRegister);
- ma_li(ScratchRegister, imm);
- ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
+ }
}
}
@@ -705,7 +793,7 @@ template void MacroAssemblerMIPSShared::ma_b<ImmTag>(Register lhs, ImmTag rhs,
void
MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind)
{
- asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+ branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
}
void
@@ -716,6 +804,30 @@ MacroAssemblerMIPSShared::ma_b(wasm::TrapDesc target, JumpKind jumpKind)
bindLater(&label, target);
}
+void
+MacroAssemblerMIPSShared::ma_jal(Label* label)
+{
+ if (label->bound()) {
+ // Generate the mixed jump.
+ addMixedJump(nextOffset(), label->offset());
+ as_jal(JOffImm26(0));
+ as_nop();
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer. The '2'
+ // instructions are writing at below (contain delay slot).
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ BufferOffset bo = as_jal(JOffImm26(0));
+ writeInst(nextInChain);
+ if (!oom())
+ label->use(bo.getOffset());
+}
+
Assembler::Condition
MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
{
@@ -768,20 +880,59 @@ MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Register rhs, C
// beq at,$zero,offs
as_slt(scratch, rhs, lhs);
return Equal;
- case Equal :
- case NotEqual:
- case Zero:
- case NonZero:
- case Always:
- case Signed:
- case NotSigned:
- MOZ_CRASH("There is a better way to compare for equality.");
- break;
- case Overflow:
- MOZ_CRASH("Overflow condition not supported for MIPS.");
- break;
default:
- MOZ_CRASH("Invalid condition for branch.");
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition
+MacroAssemblerMIPSShared::ma_cmp(Register scratch, Register lhs, Imm32 imm, Condition c)
+{
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (Imm16::IsInSignedRange(imm.value + 1) && imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ as_sltiu(scratch, lhs, imm.value + 1);
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(scratch, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_sltiu(scratch, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(scratch, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (Imm16::IsInSignedRange(imm.value + 1)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ as_slti(scratch, lhs, imm.value + 1);
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(scratch, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_slti(scratch, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(scratch, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
}
return Always;
}
@@ -853,22 +1004,21 @@ MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt, Cond
case Zero:
MOZ_ASSERT(rs == rt);
// seq d,s,$zero =>
- // xor d,s,$zero
- // sltiu d,d,1
- as_xor(rd, rs, zero);
- as_sltiu(rd, rd, 1);
+ // sltiu d,s,1
+ as_sltiu(rd, rs, 1);
break;
case NonZero:
+ MOZ_ASSERT(rs == rt);
// sne d,s,$zero =>
- // xor d,s,$zero
- // sltu d,$zero,d
- as_xor(rd, rs, zero);
- as_sltu(rd, zero, rd);
+ // sltu d,$zero,s
+ as_sltu(rd, zero, rs);
break;
case Signed:
+ MOZ_ASSERT(rs == rt);
as_slt(rd, rs, zero);
break;
case NotSigned:
+ MOZ_ASSERT(rs == rt);
// sge d,s,$zero =>
// slt d,s,$zero
// xori d,d,1
@@ -876,7 +1026,7 @@ MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt, Cond
as_xori(rd, rd, 1);
break;
default:
- MOZ_CRASH("Invalid condition for ma_cmp_set.");
+ MOZ_CRASH("Invalid condition.");
}
}
@@ -948,42 +1098,127 @@ MacroAssemblerMIPSShared::compareFloatingPoint(FloatFormat fmt, FloatRegister lh
}
void
+MacroAssemblerMIPSShared::GenerateMixedJumps()
+{
+ // Generate all mixed jumps.
+ for (size_t i = 0; i < numMixedJumps(); i++) {
+ MixedJumpPatch& mjp = mixedJump(i);
+ if (MixedJumpPatch::NONE == mjp.kind && mjp.target <= size())
+ continue;
+ BufferOffset bo = m_buffer.nextOffset();
+ if (MixedJumpPatch::CONDITIONAL & mjp.kind) {
+ // Leave space for conditional branch.
+ as_nop();
+ asMasm().ma_liPatchable(ScratchRegister, ImmWord(0));
+ as_jr(ScratchRegister);
+ }
+ asMasm().ma_liPatchable(ScratchRegister, ImmWord(0));
+ as_jr(ScratchRegister);
+ as_nop();
+ mjp.mid = bo;
+ }
+}
+
+void
MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c)
{
- ma_li(dest, Imm32(0));
- ma_li(ScratchRegister, Imm32(1));
-
FloatTestKind moveCondition;
compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+ ma_li(dest, Imm32(1));
+
if (moveCondition == TestForTrue)
- as_movt(dest, ScratchRegister);
+ as_movf(dest, zero);
else
- as_movf(dest, ScratchRegister);
+ as_movt(dest, zero);
}
void
MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c)
{
- ma_li(dest, Imm32(0));
- ma_li(ScratchRegister, Imm32(1));
-
FloatTestKind moveCondition;
compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+ ma_li(dest, Imm32(1));
+
if (moveCondition == TestForTrue)
- as_movt(dest, ScratchRegister);
+ as_movf(dest, zero);
else
- as_movf(dest, ScratchRegister);
+ as_movt(dest, zero);
}
void
MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
{
- ma_li(ScratchRegister, imm);
- ma_cmp_set(rd, rs, ScratchRegister, c);
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal :
+ case BelowOrEqual:
+ as_sltiu(rd, rs, 1);
+ break;
+ case NotEqual:
+ case Above:
+ as_sltu(rd, zero, rs);
+ break;
+ case AboveOrEqual:
+ case Below:
+ as_ori(rd, zero, c == AboveOrEqual ? 1: 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ as_slt(rd, zero, rs);
+ if (c == LessThanOrEqual)
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ as_slt(rd, rs, zero);
+ if (c == GreaterThanOrEqual)
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ as_sltiu(rd, rs, 1);
+ break;
+ case NonZero:
+ as_sltu(rd, zero, rs);
+ break;
+ case Signed:
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(rs != ScratchRegister);
+ ma_xor(rd, rs, imm);
+ if (c == Equal)
+ as_sltiu(rd, rd, 1);
+ else
+ as_sltu(rd, zero, rd);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rs, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if(cond == Equal)
+ as_xori(rd, rd, 1);
+ }
}
// fp instructions
@@ -1071,7 +1306,7 @@ MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* l
{
FloatTestKind testKind;
compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
- asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+ branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
}
void
@@ -1080,7 +1315,7 @@ MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* l
{
FloatTestKind testKind;
compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
- asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+ branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
}
void
@@ -1561,7 +1796,7 @@ MacroAssembler::call(Register reg)
CodeOffset
MacroAssembler::call(Label* label)
{
- ma_bal(label);
+ ma_jal(label);
return CodeOffset(currentOffset());
}
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
index c9bd4a4d9..4f5dcd309 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -34,7 +34,7 @@ enum LoadStoreExtension
enum JumpKind
{
- LongJump = 0,
+ MixedJump = 0,
ShortJump = 1
};
@@ -54,17 +54,21 @@ class MacroAssemblerMIPSShared : public Assembler
const MacroAssembler& asMasm() const;
Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c, FloatTestKind* testKind,
FPConditionBit fcc = FCC0);
+ void GenerateMixedJumps();
+
public:
void ma_move(Register rd, Register rs);
void ma_li(Register dest, ImmGCPtr ptr);
void ma_li(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, Imm32 imm);
// Shift operations
void ma_sll(Register rd, Register rt, Imm32 shift);
@@ -104,7 +108,7 @@ class MacroAssemblerMIPSShared : public Assembler
// load
void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
LoadStoreExtension extension = SignExtend);
- void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+ void ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp,
LoadStoreSize size, LoadStoreExtension extension);
// store
@@ -112,7 +116,7 @@ class MacroAssemblerMIPSShared : public Assembler
LoadStoreExtension extension = SignExtend);
void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
LoadStoreExtension extension = SignExtend);
- void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+ void ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp,
LoadStoreSize size, LoadStoreExtension extension);
// arithmetic based ops
@@ -145,21 +149,24 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
int32_t shift, Label* negZero = nullptr);
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
- void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
ma_b(lhs, ScratchRegister, l, c, jumpKind);
}
template <typename T>
void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
- JumpKind jumpKind = LongJump);
+ JumpKind jumpKind = MixedJump);
+
+ void ma_b(Label* l, JumpKind jumpKind = MixedJump);
+ void ma_b(wasm::TrapDesc target, JumpKind jumpKind = MixedJump);
- void ma_b(Label* l, JumpKind jumpKind = LongJump);
- void ma_b(wasm::TrapDesc target, JumpKind jumpKind = LongJump);
+ void ma_jal(Label* l);
// fp instructions
void ma_lis(FloatRegister dest, float value);
@@ -171,9 +178,9 @@ class MacroAssemblerMIPSShared : public Assembler
//FP branches
void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
- JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
- JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
void ma_call(ImmPtr dest);
@@ -184,6 +191,12 @@ class MacroAssemblerMIPSShared : public Assembler
void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+ BufferOffset ma_BoundsCheck(Register bounded) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(bounded, Imm32(0));
+ return bo;
+ }
+
void moveToDoubleLo(Register src, FloatRegister dest) {
as_mtc1(src, dest);
}