summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips64
diff options
context:
space:
mode:
authorMoonchild <moonchild@palemoon.org>2020-06-01 21:58:35 +0000
committerMoonchild <moonchild@palemoon.org>2020-06-01 21:58:35 +0000
commitc6ca4380e9e5e95df9de02daf8bfb9a6ebc22810 (patch)
treec7672903a2030d37f861b12900165a015f49d10a /js/src/jit/mips64
parent451509e2c0188a4164d4b3d1d9f5839ed1e95246 (diff)
parent744b044935f7d1d67fbe0df42d898efcbdd00536 (diff)
downloadUXP-c6ca4380e9e5e95df9de02daf8bfb9a6ebc22810.tar
UXP-c6ca4380e9e5e95df9de02daf8bfb9a6ebc22810.tar.gz
UXP-c6ca4380e9e5e95df9de02daf8bfb9a6ebc22810.tar.lz
UXP-c6ca4380e9e5e95df9de02daf8bfb9a6ebc22810.tar.xz
UXP-c6ca4380e9e5e95df9de02daf8bfb9a6ebc22810.zip
Merge remote-tracking branch 'origin/redwood' into release
Diffstat (limited to 'js/src/jit/mips64')
-rw-r--r--js/src/jit/mips64/Assembler-mips64.cpp186
-rw-r--r--js/src/jit/mips64/Assembler-mips64.h17
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.cpp10
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64-inl.h16
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp302
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h38
6 files changed, 166 insertions, 403 deletions
diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp
index 4d251f152..4f3eac094 100644
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -87,7 +87,9 @@ js::jit::SA(FloatRegister r)
void
jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
{
- Instruction* inst = (Instruction*)jump_.raw();
+ Instruction* inst;
+
+ inst = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw());
// Six instructions used in load 64-bit imm.
MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
@@ -125,23 +127,6 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
}
}
-void
-Assembler::executableCopy(uint8_t* buffer)
-{
- MOZ_ASSERT(isFinished);
- m_buffer.executableCopy(buffer);
-
- // Patch all long jumps during code copy.
- for (size_t i = 0; i < longJumps_.length(); i++) {
- Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]);
-
- uint64_t value = Assembler::ExtractLoad64Value(inst);
- Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value);
- }
-
- AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
-}
-
uintptr_t
Assembler::GetPointer(uint8_t* instPtr)
{
@@ -243,155 +228,6 @@ Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address)
}
}
-void
-Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
-{
- int64_t offset = target - branch;
- InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
- InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
-
- // If encoded offset is 4, then the jump must be short
- if (BOffImm16(inst[0]).decode() == 4) {
- MOZ_ASSERT(BOffImm16::IsInRange(offset));
- inst[0].setBOffImm16(BOffImm16(offset));
- inst[1].makeNop();
- return;
- }
-
- // Generate the long jump for calls because return address has to be the
- // address after the reserved block.
- if (inst[0].encode() == inst_bgezal.encode()) {
- addLongJump(BufferOffset(branch));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
- inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
- // There is 1 nop after this.
- return;
- }
-
- if (BOffImm16::IsInRange(offset)) {
- // Don't skip trailing nops can improve performance
- // on Loongson3 platform.
- bool skipNops = !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
- inst[0].encode() != inst_beq.encode());
-
- inst[0].setBOffImm16(BOffImm16(offset));
- inst[1].makeNop();
-
- if (skipNops) {
- inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode();
- // There are 4 nops after this
- }
- return;
- }
-
- if (inst[0].encode() == inst_beq.encode()) {
- // Handle long unconditional jump.
- addLongJump(BufferOffset(branch));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
- inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- } else {
- // Handle long conditional jump.
- inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(BufferOffset(branch + sizeof(uint32_t)));
- Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target);
- inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- }
-}
-
-void
-Assembler::bind(RepatchLabel* label)
-{
- BufferOffset dest = nextOffset();
- if (label->used() && !oom()) {
- // If the label has a use, then change this use to refer to
- // the bound label;
- BufferOffset b(label->offset());
- InstImm* inst = (InstImm*)editSrc(b);
- InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
- uint64_t offset = dest.getOffset() - label->offset();
-
- // If first instruction is lui, then this is a long jump.
- // If second instruction is lui, then this is a loop backedge.
- if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
- // For unconditional long branches generated by ma_liPatchable,
- // such as under:
- // jumpWithpatch
- Assembler::UpdateLoad64Value(inst, dest.getOffset());
- } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
- BOffImm16::IsInRange(offset))
- {
- // Handle code produced by:
- // backedgeJump
- // branchWithCode
- MOZ_ASSERT(BOffImm16::IsInRange(offset));
- MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
- inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
- inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) ||
- inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
- inst[0].setBOffImm16(BOffImm16(offset));
- } else if (inst[0].encode() == inst_beq.encode()) {
- // Handle open long unconditional jumps created by
- // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- // We need to add it to long jumps array here.
- // See MacroAssemblerMIPS64::branchWithCode().
- MOZ_ASSERT(inst[1].encode() == NopInst);
- MOZ_ASSERT(inst[2].encode() == NopInst);
- MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- MOZ_ASSERT(inst[5].encode() == NopInst);
- addLongJump(BufferOffset(label->offset()));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, dest.getOffset());
- inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- } else {
- // Handle open long conditional jumps created by
- // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
- // No need for a "nop" here because we can clobber scratch.
- // We need to add it to long jumps array here.
- // See MacroAssemblerMIPS64::branchWithCode().
- MOZ_ASSERT(inst[1].encode() == NopInst);
- MOZ_ASSERT(inst[2].encode() == NopInst);
- MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- MOZ_ASSERT(inst[5].encode() == NopInst);
- MOZ_ASSERT(inst[6].encode() == NopInst);
- addLongJump(BufferOffset(label->offset() + sizeof(uint32_t)));
- Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, dest.getOffset());
- inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- }
- }
- label->bind(dest.getOffset());
-}
-
-uint32_t
-Assembler::PatchWrite_NearCallSize()
-{
- // Load an address needs 4 instructions, and a jump with a delay slot.
- return (4 + 2) * sizeof(uint32_t);
-}
-
-void
-Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall)
-{
- Instruction* inst = (Instruction*) start.raw();
- uint8_t* dest = toCall.raw();
-
- // Overwrite whatever instruction used to be here with a call.
- // Always use long jump for two reasons:
- // - Jump has to be the same size because of PatchWrite_NearCallSize.
- // - Return address has to be at the end of replaced block.
- // Short jump wouldn't be more efficient.
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
- inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
- inst[5] = InstNOP();
-
- // Ensure everyone sees the code that was just written into memory.
- AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize());
-}
-
uint64_t
Assembler::ExtractLoad64Value(Instruction* inst0)
{
@@ -453,19 +289,6 @@ Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value)
}
void
-Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value)
-{
- Instruction* inst1 = inst0->next();
- Instruction* inst2 = inst1->next();
- Instruction* inst3 = inst2->next();
-
- *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
- *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
- *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
- *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
-}
-
-void
Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue)
{
@@ -492,8 +315,7 @@ Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal
void
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
{
- InstImm* inst = (InstImm*)code;
- Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value);
+ Assembler::UpdateLoad64Value((Instruction*)code, (uint64_t)imm.value);
}
uint64_t
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
index 8a71c57bb..5ca003438 100644
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -143,28 +143,17 @@ class Assembler : public AssemblerMIPSShared
static uintptr_t GetPointer(uint8_t*);
- using AssemblerMIPSShared::bind;
-
- void bind(RepatchLabel* label);
void Bind(uint8_t* rawCode, CodeOffset* label, const void* address);
static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
- void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
-
- // Copy the assembly code to the given buffer, and perform any pending
- // relocations relying on the target address.
- void executableCopy(uint8_t* buffer);
-
- static uint32_t PatchWrite_NearCallSize();
-
+ static uint32_t InstructionImmediateSize() {
+ return 4 * sizeof(uint32_t);
+ }
static uint64_t ExtractLoad64Value(Instruction* inst0);
static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
- static void WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value);
-
- static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall);
static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
ImmPtr expectedValue);
static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue,
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp
index 45f0e69d7..862960bdf 100644
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -449,10 +449,10 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
- masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ masm.ma_load_unaligned(mir->access(), ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
return;
@@ -460,6 +460,7 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@@ -514,16 +515,17 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
- masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ masm.ma_store_unaligned(mir->access(), ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
return;
}
masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
diff --git a/js/src/jit/mips64/MacroAssembler-mips64-inl.h b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
index f5737748b..e88122f57 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64-inl.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -279,8 +279,9 @@ MacroAssembler::mul64(const Operand& src, const Register64& dest, const Register
void
MacroAssembler::mulBy3(Register src, Register dest)
{
- as_daddu(dest, src, src);
- as_daddu(dest, dest, src);
+ MOZ_ASSERT(src != ScratchRegister);
+ as_daddu(ScratchRegister, src, src);
+ as_daddu(dest, ScratchRegister, src);
}
void
@@ -706,8 +707,12 @@ MacroAssembler::wasmBoundsCheck(Condition cond, Register index, L label)
void
MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit)
{
+ Instruction* inst = (Instruction*) patchAt;
+ InstImm* i0 = (InstImm*) inst;
+ InstImm* i1 = (InstImm*) i0->next();
+
// Replace with new value
- Assembler::UpdateLoad64Value((Instruction*) patchAt, limit);
+ AssemblerMIPSShared::UpdateLuiOriValue(i0, i1, limit);
}
//}}} check_macroassembler_style
@@ -721,9 +726,8 @@ inline void
MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
Register dest)
{
- loadPtr(lhs, ScratchRegister);
- movePtr(rhs, SecondScratchReg);
- cmpPtrSet(cond, ScratchRegister, SecondScratchReg, dest);
+ loadPtr(lhs, SecondScratchReg);
+ cmpPtrSet(cond, SecondScratchReg, rhs, dest);
}
template<>
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
index f58184bca..424e04306 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -258,36 +258,45 @@ MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm)
{
int64_t value = imm.value;
- if (value >= INT16_MIN && value <= INT16_MAX) {
+ if (-1 == (value >> 15) || 0 == (value >> 15)) {
as_addiu(dest, zero, value);
- } else if (imm.value <= UINT16_MAX) {
- as_ori(dest, zero, Imm16::Lower(Imm32(value)).encode());
- } else if (value >= INT32_MIN && value <= INT32_MAX) {
- as_lui(dest, Imm16::Upper(Imm32(value)).encode());
- if (value & 0xffff)
- as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
- } else if (imm.value <= UINT32_MAX) {
- as_lui(dest, Imm16::Upper(Imm32(value)).encode());
- if (value & 0xffff)
- as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
+ return;
+ }
+ if (0 == (value >> 16)) {
+ as_ori(dest, zero, value);
+ return;
+ }
+
+ if (-1 == (value >> 31) || 0 == (value >> 31)) {
+ as_lui(dest, uint16_t(value >> 16));
+ } else if (0 == (value >> 32)) {
+ as_lui(dest, uint16_t(value >> 16));
+ as_dinsu(dest, zero, 32, 32);
+ } else if (-1 == (value >> 47) || 0 == (value >> 47)) {
+ as_lui(dest, uint16_t(value >> 32));
+ if (uint16_t(value >> 16))
+ as_ori(dest, dest, uint16_t(value >> 16));
+ as_dsll(dest, dest, 16);
+ } else if (0 == (value >> 48)) {
+ as_lui(dest, uint16_t(value >> 32));
as_dinsu(dest, zero, 32, 32);
+ if (uint16_t(value >> 16))
+ as_ori(dest, dest, uint16_t(value >> 16));
+ as_dsll(dest, dest, 16);
} else {
- uint64_t high = imm.value >> 32;
-
- if (imm.value >> 48) {
- as_lui(dest, Imm16::Upper(Imm32(high)).encode());
- if (high & 0xffff)
- as_ori(dest, dest, Imm16::Lower(Imm32(high)).encode());
+ as_lui(dest, uint16_t(value >> 48));
+ if (uint16_t(value >> 32))
+ as_ori(dest, dest, uint16_t(value >> 32));
+ if (uint16_t(value >> 16)) {
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, uint16_t(value >> 16));
as_dsll(dest, dest, 16);
} else {
- as_lui(dest, Imm16::Lower(Imm32(high)).encode());
+ as_dsll32(dest, dest, 32);
}
- if ((imm.value >> 16) & 0xffff)
- as_ori(dest, dest, Imm16::Upper(Imm32(value)).encode());
- as_dsll(dest, dest, 16);
- if (value & 0xffff)
- as_ori(dest, dest, Imm16::Lower(Imm32(value)).encode());
}
+ if (uint16_t(value))
+ as_ori(dest, dest, uint16_t(value));
}
// This method generates lui, dsll and ori instruction block that can be modified
@@ -488,7 +497,7 @@ void
MacroAssemblerMIPS64::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, L overflow)
{
// Check for signed range because of as_daddiu
- if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
+ if (Imm16::IsInSignedRange(imm.value)) {
as_daddiu(SecondScratchReg, rs, imm.value);
as_addiu(rd, rs, imm.value);
ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
@@ -710,14 +719,8 @@ MacroAssemblerMIPS64::ma_push(Register r)
void
MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label, Condition c, JumpKind jumpKind)
{
- MOZ_ASSERT(c != Overflow);
- if (imm.value == 0) {
- if (c == Always || c == AboveOrEqual)
- ma_b(label, jumpKind);
- else if (c == Below)
- ; // This condition is always false. No branch required.
- else
- branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ if (imm.value <= INT32_MAX) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
} else {
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
@@ -748,127 +751,20 @@ MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c
}
void
-MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
-{
- if (label->bound()) {
- // Generate the long jump for calls because return address has to be
- // the address after the reserved block.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jalr(ScratchRegister);
- if (delaySlotFill == FillDelaySlot)
- as_nop();
- return;
- }
-
- // Second word holds a pointer to the next branch in label's chain.
- uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
-
- // Make the whole branch continous in the buffer. The '6'
- // instructions are writing at below (contain delay slot).
- m_buffer.ensureSpace(6 * sizeof(uint32_t));
-
- BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
- writeInst(nextInChain);
- if (!oom())
- label->use(bo.getOffset());
- // Leave space for long jump.
- as_nop();
- as_nop();
- as_nop();
- if (delaySlotFill == FillDelaySlot)
- as_nop();
-}
-
-void
-MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKind)
-{
- MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
- InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
-
- if (label->bound()) {
- int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
-
- if (BOffImm16::IsInRange(offset))
- jumpKind = ShortJump;
-
- if (jumpKind == ShortJump) {
- MOZ_ASSERT(BOffImm16::IsInRange(offset));
- code.setBOffImm16(BOffImm16(offset));
- writeInst(code.encode());
- as_nop();
- return;
- }
-
- if (code.encode() == inst_beq.encode()) {
- // Handle long jump
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jr(ScratchRegister);
- as_nop();
- return;
- }
-
- // Handle long conditional branch, the target offset is based on self,
- // point to next instruction of nop at below.
- writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jr(ScratchRegister);
- as_nop();
- return;
- }
-
- // Generate open jump and link it to a label.
-
- // Second word holds a pointer to the next branch in label's chain.
- uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
-
- if (jumpKind == ShortJump) {
- // Make the whole branch continous in the buffer.
- m_buffer.ensureSpace(2 * sizeof(uint32_t));
-
- // Indicate that this is short jump with offset 4.
- code.setBOffImm16(BOffImm16(4));
- BufferOffset bo = writeInst(code.encode());
- writeInst(nextInChain);
- if (!oom())
- label->use(bo.getOffset());
- return;
- }
-
- bool conditional = code.encode() != inst_beq.encode();
-
- // Make the whole branch continous in the buffer. The '7'
- // instructions are writing at below (contain conditional nop).
- m_buffer.ensureSpace(7 * sizeof(uint32_t));
-
- BufferOffset bo = writeInst(code.encode());
- writeInst(nextInChain);
- if (!oom())
- label->use(bo.getOffset());
- // Leave space for potential long jump.
- as_nop();
- as_nop();
- as_nop();
- as_nop();
- if (conditional)
- as_nop();
-}
-
-void
MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm, Condition c)
{
- ma_li(ScratchRegister, imm);
- ma_cmp_set(rd, rs, ScratchRegister, c);
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+ }
}
void
MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm, Condition c)
{
- ma_li(ScratchRegister, ImmWord(uintptr_t(imm.value)));
- ma_cmp_set(rd, rs, ScratchRegister, c);
+ ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
}
// fp instructions
@@ -877,6 +773,10 @@ MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value)
{
ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+ if (imm.value == 0) {
+ moveToDouble(zero, dest);
+ return;
+ }
ma_li(ScratchRegister, imm);
moveToDouble(ScratchRegister, dest);
}
@@ -1149,21 +1049,21 @@ MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
}
void
-MacroAssemblerMIPS64Compat::loadUnalignedDouble(const BaseIndex& src, Register temp,
- FloatRegister dest)
+MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
-
+ BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
- as_ldl(temp, SecondScratchReg, src.offset + 7);
+ load = as_ldl(temp, SecondScratchReg, src.offset + 7);
as_ldr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_ldl(temp, ScratchRegister, 7);
+ load = as_ldl(temp, ScratchRegister, 7);
as_ldr(temp, ScratchRegister, 0);
}
-
+ append(access, load.getOffset(), asMasm().framePushed());
moveToDouble(temp, dest);
}
@@ -1195,21 +1095,21 @@ MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest
}
void
-MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
- FloatRegister dest)
+MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
-
+ BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
- as_lwl(temp, SecondScratchReg, src.offset + 3);
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
as_lwr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_lwl(temp, ScratchRegister, 3);
+ load = as_lwl(temp, ScratchRegister, 3);
as_lwr(temp, ScratchRegister, 0);
}
-
+ append(access, load.getOffset(), asMasm().framePushed());
moveToFloat32(temp, dest);
}
@@ -1279,6 +1179,10 @@ MacroAssemblerMIPS64Compat::store32(Register src, const Address& address)
void
MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address)
{
+ if (src.value == 0) {
+ ma_store(zero, address, SizeWord);
+ return;
+ }
move32(src, SecondScratchReg);
ma_store(SecondScratchReg, address, SizeWord);
}
@@ -1347,39 +1251,42 @@ MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
}
void
-MacroAssemblerMIPS64Compat::storeUnalignedFloat32(FloatRegister src, Register temp,
- const BaseIndex& dest)
+MacroAssemblerMIPS64Compat::storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromFloat32(src, temp);
-
+ BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
- as_swl(temp, SecondScratchReg, dest.offset + 3);
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
as_swr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_swl(temp, ScratchRegister, 3);
+ store = as_swl(temp, ScratchRegister, 3);
as_swr(temp, ScratchRegister, 0);
}
+ append(access, store.getOffset(), asMasm().framePushed());
}
void
-MacroAssemblerMIPS64Compat::storeUnalignedDouble(FloatRegister src, Register temp,
- const BaseIndex& dest)
+MacroAssemblerMIPS64Compat::storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromDouble(src, temp);
+ BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
- as_sdl(temp, SecondScratchReg, dest.offset + 7);
+ store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
as_sdr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_sdl(temp, ScratchRegister, 7);
+ store = as_sdl(temp, ScratchRegister, 7);
as_sdr(temp, ScratchRegister, 0);
}
+ append(access, store.getOffset(), asMasm().framePushed());
}
// Note: this function clobbers the input register.
@@ -1455,22 +1362,52 @@ MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond, const ValueOperand&
void
MacroAssemblerMIPS64Compat::unboxNonDouble(const ValueOperand& operand, Register dest)
{
+ Label isInt32, done;
+ Register tag = splitTagForTest(operand);
+ asMasm().branchTestInt32(Assembler::Equal, tag, &isInt32);
+
ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ jump(&done);
+
+ bind(&isInt32);
+ ma_sll(dest, operand.valueReg(), Imm32(0));
+
+ bind(&done);
}
void
MacroAssemblerMIPS64Compat::unboxNonDouble(const Address& src, Register dest)
{
+ Label isInt32, done;
loadPtr(Address(src.base, src.offset), dest);
+ splitTag(dest, SecondScratchReg);
+ asMasm().branchTestInt32(Assembler::Equal, SecondScratchReg, &isInt32);
+
ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ jump(&done);
+
+ bind(&isInt32);
+ ma_sll(dest, dest, Imm32(0));
+
+ bind(&done);
}
void
MacroAssemblerMIPS64Compat::unboxNonDouble(const BaseIndex& src, Register dest)
{
+ Label isInt32, done;
computeScaledAddress(src, SecondScratchReg);
loadPtr(Address(SecondScratchReg, src.offset), dest);
+ splitTag(dest, SecondScratchReg);
+ asMasm().branchTestInt32(Assembler::Equal, SecondScratchReg, &isInt32);
+
ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ jump(&done);
+
+ bind(&isInt32);
+ ma_sll(dest, dest, Imm32(0));
+
+ bind(&done);
}
void
@@ -1823,13 +1760,12 @@ MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentat
{
// Only one branch per label.
MOZ_ASSERT(!label->used());
- uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
BufferOffset bo = nextOffset();
label->use(bo.getOffset());
- addLongJump(bo);
- ma_liPatchable(ScratchRegister, ImmWord(dest));
- as_jr(ScratchRegister);
+ if (label->bound())
+ addMixedJump(bo, label->offset(), MixedJumpPatch::PATCHABLE);
+ as_j(JOffImm26(0));
as_nop();
return CodeOffsetJump(bo.getOffset());
}
@@ -1878,7 +1814,11 @@ MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg, Address d
ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
- ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(32));
+ } else {
+ ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
storePtr(SecondScratchReg, Address(dest.base, dest.offset));
}
@@ -1925,8 +1865,13 @@ void
MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload, ValueOperand dest)
{
MOZ_ASSERT(dest.valueReg() != ScratchRegister);
- if (payload != dest.valueReg())
- ma_move(dest.valueReg(), payload);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dext(dest.valueReg(), payload, Imm32(0), Imm32(32));
+ } else {
+ if (payload != dest.valueReg()) {
+ ma_move(dest.valueReg(), payload);
+ }
+ }
ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT), Imm32(64 - JSVAL_TAG_SHIFT));
}
@@ -2051,9 +1996,9 @@ MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)), StackPointer);
// We're going to be returning by the ion calling convention
- ma_pop(ra);
+ as_ld(ra, StackPointer, 0);
as_jr(ra);
- as_nop();
+ as_daddiu(StackPointer, StackPointer, sizeof(intptr_t)); // in delay slot.
// If we found a catch handler, this must be a baseline frame. Restore
// state and jump to the catch block.
@@ -2075,8 +2020,9 @@ MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(void* handler)
loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
pushValue(BooleanValue(true));
- pushValue(exception);
- jump(a0);
+ as_daddiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_jr(a0);
+ as_sd(exception.valueReg(), StackPointer, 0); // In delay slot
// Only used in debug mode. Return BaselineFrame->returnValue() to the
// caller.
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
index bfe452974..b50ee7978 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -115,20 +115,17 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
void ma_pop(Register r);
void ma_push(Register r);
- void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
- void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_load(ScratchRegister, addr, SizeDouble);
ma_b(ScratchRegister, rhs, l, c, jumpKind);
}
- void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
-
// fp instructions
void ma_lid(FloatRegister dest, double value);
@@ -473,7 +470,12 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
ma_li(dest, Imm32(tag));
ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
- ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dins(dest, src, Imm32(0), Imm32(32));
+ } else {
+ ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
}
void storeValue(ValueOperand val, Operand dst);
@@ -905,7 +907,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
- void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+ Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
@@ -913,7 +916,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
- void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+ Register temp, FloatRegister dest);
void store8(Register src, const Address& address);
void store8(Imm32 imm, const Address& address);
@@ -952,8 +956,10 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void storePtr(Register src, const BaseIndex& address);
void storePtr(Register src, AbsoluteAddress dest);
- void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
- void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, FloatRegister src,
+ Register temp, const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access, FloatRegister src,
+ Register temp, const BaseIndex& dest);
void moveDouble(FloatRegister src, FloatRegister dest) {
as_movd(dest, src);
@@ -1009,12 +1015,6 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
as_nop();
}
- BufferOffset ma_BoundsCheck(Register bounded) {
- BufferOffset bo = m_buffer.nextOffset();
- ma_liPatchable(bounded, ImmWord(0));
- return bo;
- }
-
void moveFloat32(FloatRegister src, FloatRegister dest) {
as_movs(dest, src);
}