summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips64
diff options
context:
space:
mode:
authorJiaxun Yang <jiaxun.yang@flygoat.com>2020-05-12 12:40:07 +0800
committerMoonchild <moonchild@palemoon.org>2020-05-20 14:00:59 +0000
commit3fb8648bed83c95f601d2c2e20962cbd08bc7dc7 (patch)
tree2de25a180e7fd42a82be6aad4c8db98ae24d32cf /js/src/jit/mips64
parentf18d78dea035c5291984d1c468336d20dc0b47b7 (diff)
downloadUXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar.gz
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar.lz
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar.xz
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.zip
Bug 1271968 - IonMonkey: MIPS: Replace long jumps by mixed jumps.
Tag: #1542
Diffstat (limited to 'js/src/jit/mips64')
-rw-r--r--js/src/jit/mips64/Assembler-mips64.cpp105
-rw-r--r--js/src/jit/mips64/Assembler-mips64.h4
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp75
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h12
4 files changed, 54 insertions, 142 deletions
diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp
index 4d251f152..6d7636309 100644
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -87,7 +87,9 @@ js::jit::SA(FloatRegister r)
void
jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
{
- Instruction* inst = (Instruction*)jump_.raw();
+ Instruction* inst;
+
+ inst = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw());
// Six instructions used in load 64-bit imm.
MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
@@ -125,23 +127,6 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
}
}
-void
-Assembler::executableCopy(uint8_t* buffer)
-{
- MOZ_ASSERT(isFinished);
- m_buffer.executableCopy(buffer);
-
- // Patch all long jumps during code copy.
- for (size_t i = 0; i < longJumps_.length(); i++) {
- Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]);
-
- uint64_t value = Assembler::ExtractLoad64Value(inst);
- Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value);
- }
-
- AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
-}
-
uintptr_t
Assembler::GetPointer(uint8_t* instPtr)
{
@@ -247,8 +232,12 @@ void
Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
{
int64_t offset = target - branch;
- InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
- InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // Generate the patchable mixed jump for call.
+ if (inst->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) {
+ addMixedJump(BufferOffset(branch), ImmPtr((void*)target));
+ return;
+ }
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
@@ -258,47 +247,14 @@ Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
return;
}
- // Generate the long jump for calls because return address has to be the
- // address after the reserved block.
- if (inst[0].encode() == inst_bgezal.encode()) {
- addLongJump(BufferOffset(branch));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
- inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
- // There is 1 nop after this.
- return;
- }
-
if (BOffImm16::IsInRange(offset)) {
- // Don't skip trailing nops can improve performance
- // on Loongson3 platform.
- bool skipNops = !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
- inst[0].encode() != inst_beq.encode());
-
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
- if (skipNops) {
- inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode();
- // There are 4 nops after this
- }
return;
}
- if (inst[0].encode() == inst_beq.encode()) {
- // Handle long unconditional jump.
- addLongJump(BufferOffset(branch));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
- inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- } else {
- // Handle long conditional jump.
- inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(BufferOffset(branch + sizeof(uint32_t)));
- Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target);
- inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- }
+ addMixedJump(BufferOffset(branch), ImmPtr((void*)target));
}
void
@@ -313,19 +269,16 @@ Assembler::bind(RepatchLabel* label)
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
uint64_t offset = dest.getOffset() - label->offset();
- // If first instruction is lui, then this is a long jump.
+ // If first instruction is j, then this is a mixed jump.
// If second instruction is lui, then this is a loop backedge.
- if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
- // For unconditional long branches generated by ma_liPatchable,
- // such as under:
- // jumpWithpatch
- Assembler::UpdateLoad64Value(inst, dest.getOffset());
+ if (inst[0].extractOpcode() == (uint32_t(op_j) >> OpcodeShift)) {
+ // For unconditional mixed branches generated by jumpWithPatch
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
} else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
BOffImm16::IsInRange(offset))
{
// Handle code produced by:
// backedgeJump
- // branchWithCode
MOZ_ASSERT(BOffImm16::IsInRange(offset));
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
@@ -333,34 +286,25 @@ Assembler::bind(RepatchLabel* label)
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
inst[0].setBOffImm16(BOffImm16(offset));
} else if (inst[0].encode() == inst_beq.encode()) {
- // Handle open long unconditional jumps created by
+ // Handle open mixed unconditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- // We need to add it to long jumps array here.
+ // We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS64::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
- MOZ_ASSERT(inst[2].encode() == NopInst);
- MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- MOZ_ASSERT(inst[5].encode() == NopInst);
- addLongJump(BufferOffset(label->offset()));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, dest.getOffset());
- inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
+ inst[0] = InstJump(op_j, JOffImm26(0)).encode();
} else {
- // Handle open long conditional jumps created by
+ // Handle open mixed conditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
+ inst[0] = invertBranch(inst[0], BOffImm16(4 * sizeof(uint32_t)));
// No need for a "nop" here because we can clobber scratch.
- // We need to add it to long jumps array here.
+ // We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS64::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- MOZ_ASSERT(inst[5].encode() == NopInst);
- MOZ_ASSERT(inst[6].encode() == NopInst);
- addLongJump(BufferOffset(label->offset() + sizeof(uint32_t)));
- Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, dest.getOffset());
- inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
+ inst[2] = InstJump(op_j, JOffImm26(0)).encode();
}
}
label->bind(dest.getOffset());
@@ -492,8 +436,7 @@ Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal
void
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
{
- InstImm* inst = (InstImm*)code;
- Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value);
+ Assembler::UpdateLoad64Value((Instruction*)code, (uint64_t)imm.value);
}
uint64_t
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
index 8a71c57bb..5cb9d1239 100644
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -153,10 +153,6 @@ class Assembler : public AssemblerMIPSShared
void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
- // Copy the assembly code to the given buffer, and perform any pending
- // relocations relying on the target address.
- void executableCopy(uint8_t* buffer);
-
static uint32_t PatchWrite_NearCallSize();
static uint64_t ExtractLoad64Value(Instruction* inst0);
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
index c35723d74..137a24b59 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -757,36 +757,27 @@ MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c
}
void
-MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+MacroAssemblerMIPS64::ma_jal(Label* label)
{
if (label->bound()) {
- // Generate the long jump for calls because return address has to be
- // the address after the reserved block.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jalr(ScratchRegister);
- if (delaySlotFill == FillDelaySlot)
- as_nop();
+ // Generate the mixed jump.
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_jal(JOffImm26(0));
+ as_nop();
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
- // Make the whole branch continous in the buffer. The '6'
+ // Make the whole branch continous in the buffer. The '2'
// instructions are writing at below (contain delay slot).
- m_buffer.ensureSpace(6 * sizeof(uint32_t));
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
- BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ BufferOffset bo = as_jal(JOffImm26(0));
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
- // Leave space for long jump.
- as_nop();
- as_nop();
- as_nop();
- if (delaySlotFill == FillDelaySlot)
- as_nop();
}
void
@@ -810,21 +801,19 @@ MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKi
}
if (code.encode() == inst_beq.encode()) {
- // Handle long jump
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jr(ScratchRegister);
+ // Handle mixed jump
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_j(JOffImm26(0));
as_nop();
return;
}
// Handle long conditional branch, the target offset is based on self,
// point to next instruction of nop at below.
- writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jr(ScratchRegister);
+ writeInst(invertBranch(code, BOffImm16(4 * sizeof(uint32_t))).encode());
+ as_nop();
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_j(JOffImm26(0));
as_nop();
return;
}
@@ -834,36 +823,21 @@ MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKi
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
- if (jumpKind == ShortJump) {
- // Make the whole branch continous in the buffer.
- m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ if (jumpKind == ShortJump) {
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
- BufferOffset bo = writeInst(code.encode());
- writeInst(nextInChain);
- if (!oom())
- label->use(bo.getOffset());
- return;
}
-
- bool conditional = code.encode() != inst_beq.encode();
-
- // Make the whole branch continous in the buffer. The '7'
- // instructions are writing at below (contain conditional nop).
- m_buffer.ensureSpace(7 * sizeof(uint32_t));
-
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
- // Leave space for potential long jump.
- as_nop();
- as_nop();
- as_nop();
- as_nop();
- if (conditional)
+ if (jumpKind != ShortJump && code.encode() != inst_beq.encode()) {
as_nop();
+ as_nop();
+ }
}
void
@@ -1844,13 +1818,12 @@ MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentat
{
// Only one branch per label.
MOZ_ASSERT(!label->used());
- uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
BufferOffset bo = nextOffset();
label->use(bo.getOffset());
- addLongJump(bo);
- ma_liPatchable(ScratchRegister, ImmWord(dest));
- as_jr(ScratchRegister);
+ if (label->bound())
+ addMixedJump(bo, ImmPtr((void*)label->offset()), MixedJumpPatch::PATCHABLE);
+ as_j(JOffImm26(0));
as_nop();
return CodeOffsetJump(bo.getOffset());
}
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
index 6b98f1f2c..d4b850096 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -117,17 +117,17 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
- void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_load(ScratchRegister, addr, SizeDouble);
ma_b(ScratchRegister, rhs, l, c, jumpKind);
}
- void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+ void ma_jal(Label* l);
// fp instructions
void ma_lid(FloatRegister dest, double value);