summaryrefslogtreecommitdiffstats
path: root/js/src/jit
diff options
context:
space:
mode:
authorJiaxun Yang <jiaxun.yang@flygoat.com>2020-05-12 12:40:07 +0800
committerMoonchild <moonchild@palemoon.org>2020-05-20 14:00:59 +0000
commit3fb8648bed83c95f601d2c2e20962cbd08bc7dc7 (patch)
tree2de25a180e7fd42a82be6aad4c8db98ae24d32cf /js/src/jit
parentf18d78dea035c5291984d1c468336d20dc0b47b7 (diff)
downloadUXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar.gz
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar.lz
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.tar.xz
UXP-3fb8648bed83c95f601d2c2e20962cbd08bc7dc7.zip
Bug 1271968 - IonMonkey: MIPS: Replace long jumps by mixed jumps.
Tag: #1542
Diffstat (limited to 'js/src/jit')
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.cpp100
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h61
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp18
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h22
-rw-r--r--js/src/jit/mips32/Assembler-mips32.cpp101
-rw-r--r--js/src/jit/mips32/Assembler-mips32.h4
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp68
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h14
-rw-r--r--js/src/jit/mips64/Assembler-mips64.cpp105
-rw-r--r--js/src/jit/mips64/Assembler-mips64.h4
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp75
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h12
12 files changed, 277 insertions, 307 deletions
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
index 58af49dda..8519bab4d 100644
--- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -92,6 +92,7 @@ void
AssemblerMIPSShared::finish()
{
MOZ_ASSERT(!isFinished);
+ GenerateMixedJumps();
isFinished = true;
}
@@ -100,13 +101,21 @@ AssemblerMIPSShared::asmMergeWith(const AssemblerMIPSShared& other)
{
if (!AssemblerShared::asmMergeWith(size(), other))
return false;
- for (size_t i = 0; i < other.numLongJumps(); i++) {
- size_t off = other.longJumps_[i];
- addLongJump(BufferOffset(size() + off));
- }
return m_buffer.appendBuffer(other.m_buffer);
}
+void
+AssemblerMIPSShared::executableCopy(uint8_t* buffer)
+{
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+
+ // Patch all mixed jumps during code copy.
+ PatchMixedJumps(buffer);
+
+ AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
+}
+
uint32_t
AssemblerMIPSShared::actualIndex(uint32_t idx_) const
{
@@ -1662,6 +1671,89 @@ AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count)
return reinterpret_cast<uint8_t*>(inst->next());
}
+Instruction*
+AssemblerMIPSShared::GetInstructionImmediateFromJump(Instruction* jump)
+{
+ if (jump->extractOpcode() == ((uint32_t)op_j >> OpcodeShift) ||
+ jump->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift))
+ {
+ InstJump* j = (InstJump*) jump;
+ uintptr_t base = (uintptr_t(j) >> Imm28Bits) << Imm28Bits;
+ uint32_t index = j->extractImm26Value() << 2;
+
+ jump = (Instruction*)(base | index);
+ }
+
+ return jump;
+}
+
+void
+AssemblerMIPSShared::PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target)
+{
+ InstImm* b = (InstImm*)src;
+ uint32_t opcode = b->extractOpcode();
+ int offset;
+
+ if (mid) {
+ offset = intptr_t(mid);
+ Assembler::PatchInstructionImmediate(mid, PatchedImmPtr(target));
+ } else {
+ offset = intptr_t(target);
+ }
+
+ if (((uint32_t)op_j >> OpcodeShift) == opcode ||
+ ((uint32_t)op_jal >> OpcodeShift) == opcode)
+ {
+ InstJump* j = (InstJump*)b;
+
+ j->setJOffImm26(JOffImm26(offset));
+ } else {
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+ int i = (b[0].encode() == inst_beq.encode()) ? 0 : 2;
+
+ b[i] = InstJump(op_j, JOffImm26(offset)).encode();
+ }
+}
+
+void
+AssemblerMIPSShared::PatchMixedJumps(uint8_t* buffer)
+{
+ // Patch all mixed jumps.
+ for (size_t i = 0; i < numMixedJumps(); i++) {
+ MixedJumpPatch& mjp = mixedJump(i);
+ InstImm* b = (InstImm*)(buffer + mjp.src.getOffset());
+ uint32_t opcode = b->extractOpcode();
+ int offset;
+
+ if (mjp.mid.assigned()) {
+ offset = intptr_t(buffer) + mjp.mid.getOffset();
+ Assembler::PatchInstructionImmediate(buffer + mjp.mid.getOffset(),
+ PatchedImmPtr(buffer + uintptr_t(mjp.target)));
+ } else {
+ offset = intptr_t(buffer) + intptr_t(mjp.target);
+ }
+
+ if (((uint32_t)op_j >> OpcodeShift) == opcode ||
+ ((uint32_t)op_jal >> OpcodeShift) == opcode)
+ {
+ InstJump* j = (InstJump*)b;
+
+ j->setJOffImm26(JOffImm26(offset));
+ } else {
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (b[0].encode() == inst_beq.encode()) {
+ b[0] = InstJump(op_j, JOffImm26(offset)).encode();
+ } else {
+ b[0] = invertBranch(b[0], BOffImm16(4 * sizeof(uint32_t)));
+ b[2] = InstJump(op_j, JOffImm26(offset)).encode();
+ }
+ }
+
+ b[1].makeNop();
+ }
+}
+
// Since there are no pools in MIPS implementation, this should be simple.
Instruction*
Instruction::next()
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
index fbf1fefdd..8fca6f5c2 100644
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -525,21 +525,13 @@ class JOffImm26
}
int32_t decode() {
MOZ_ASSERT(!isInvalid());
- return (int32_t(data << 8) >> 6) + 4;
+ return int32_t(data << 8) >> 6;
}
explicit JOffImm26(int offset)
- : data ((offset - 4) >> 2 & Imm26Mask)
+ : data (offset >> 2 & Imm26Mask)
{
MOZ_ASSERT((offset & 0x3) == 0);
- MOZ_ASSERT(IsInRange(offset));
- }
- static bool IsInRange(int offset) {
- if ((offset - 4) < -536870912)
- return false;
- if ((offset - 4) > 536870908)
- return false;
- return true;
}
static const uint32_t INVALID = 0x20000000;
JOffImm26()
@@ -840,6 +832,26 @@ class AssemblerMIPSShared : public AssemblerShared
TestForFalse
};
+ struct MixedJumpPatch
+ {
+ enum Kind {
+ NONE,
+ PATCHABLE
+ };
+
+ BufferOffset src;
+ BufferOffset mid;
+ void* target;
+ Kind kind;
+
+ MixedJumpPatch(BufferOffset src, void* target, Kind kind)
+ : src(src),
+ mid(BufferOffset()),
+ target(target),
+ kind(kind)
+ { }
+ };
+
// :( this should be protected, but since CodeGenerator
// wants to use it, It needs to go out here :(
@@ -873,7 +885,7 @@ class AssemblerMIPSShared : public AssemblerShared
};
js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
- js::Vector<uint32_t, 8, SystemAllocPolicy> longJumps_;
+ js::Vector<MixedJumpPatch, 8, SystemAllocPolicy> mixedJumps_;
CompactBufferWriter jumpRelocations_;
CompactBufferWriter dataRelocations_;
@@ -922,7 +934,9 @@ class AssemblerMIPSShared : public AssemblerShared
public:
void finish();
bool asmMergeWith(const AssemblerMIPSShared& other);
- void executableCopy(void* buffer);
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);
void copyPreBarrierTable(uint8_t* dest);
@@ -1240,16 +1254,21 @@ class AssemblerMIPSShared : public AssemblerShared
writeRelocation(src);
}
- void addLongJump(BufferOffset src) {
- enoughMemory_ &= longJumps_.append(src.getOffset());
+ void addMixedJump(BufferOffset src, ImmPtr target,
+ MixedJumpPatch::Kind kind = MixedJumpPatch::NONE)
+ {
+ enoughMemory_ &= mixedJumps_.append(MixedJumpPatch(src, target.value, kind));
}
+ virtual void GenerateMixedJumps() = 0;
+ void PatchMixedJumps(uint8_t* buffer);
+
public:
- size_t numLongJumps() const {
- return longJumps_.length();
+ size_t numMixedJumps() const {
+ return mixedJumps_.length();
}
- uint32_t longJump(size_t i) {
- return longJumps_[i];
+ MixedJumpPatch& mixedJump(size_t i) {
+ return mixedJumps_[i];
}
void flushBuffer() {
@@ -1269,6 +1288,8 @@ class AssemblerMIPSShared : public AssemblerShared
}
static uint8_t* NextInstruction(uint8_t* instruction, uint32_t* count = nullptr);
+ static Instruction* GetInstructionImmediateFromJump(Instruction* jump);
+ static void PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target);
static void ToggleToJmp(CodeLocationLabel inst_);
static void ToggleToCmp(CodeLocationLabel inst_);
@@ -1489,6 +1510,10 @@ class InstJump : public Instruction
uint32_t extractImm26Value() {
return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
}
+ void setJOffImm26(JOffImm26 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm26Mask) | off.encode();
+ }
};
// Class for Loongson-specific instructions
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
index 658cafd36..a8b53e67c 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -959,6 +959,22 @@ MacroAssemblerMIPSShared::compareFloatingPoint(FloatFormat fmt, FloatRegister lh
}
void
+MacroAssemblerMIPSShared::GenerateMixedJumps()
+{
+ // Generate all mixed jumps.
+ for (size_t i = 0; i < numMixedJumps(); i++) {
+ MixedJumpPatch& mjp = mixedJump(i);
+ if (MixedJumpPatch::NONE == mjp.kind && uintptr_t(mjp.target) <= size())
+ continue;
+ BufferOffset bo = m_buffer.nextOffset();
+ asMasm().ma_liPatchable(ScratchRegister, ImmWord(0));
+ as_jr(ScratchRegister);
+ as_nop();
+ mjp.mid = bo;
+ }
+}
+
+void
MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
DoubleCondition c)
{
@@ -1572,7 +1588,7 @@ MacroAssembler::call(Register reg)
CodeOffset
MacroAssembler::call(Label* label)
{
- ma_bal(label);
+ ma_jal(label);
return CodeOffset(currentOffset());
}
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
index aea389d8a..0fa73b616 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -34,7 +34,7 @@ enum LoadStoreExtension
enum JumpKind
{
- LongJump = 0,
+ MixedJump = 0,
ShortJump = 1
};
@@ -59,6 +59,8 @@ class MacroAssemblerMIPSShared : public Assembler
DoubleCondition c, FloatTestKind* testKind,
FPConditionBit fcc = FCC0);
+ void GenerateMixedJumps();
+
public:
void ma_move(Register rd, Register rs);
@@ -147,20 +149,20 @@ class MacroAssemblerMIPSShared : public Assembler
int32_t shift, Label* negZero = nullptr);
// branches when done from within mips-specific code
- void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(lhs != ScratchRegister);
ma_li(ScratchRegister, imm);
ma_b(lhs, ScratchRegister, l, c, jumpKind);
}
template <typename T>
void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
- JumpKind jumpKind = LongJump);
+ JumpKind jumpKind = MixedJump);
- void ma_b(Label* l, JumpKind jumpKind = LongJump);
- void ma_b(wasm::TrapDesc target, JumpKind jumpKind = LongJump);
+ void ma_b(Label* l, JumpKind jumpKind = MixedJump);
+ void ma_b(wasm::TrapDesc target, JumpKind jumpKind = MixedJump);
// fp instructions
void ma_lis(FloatRegister dest, float value);
@@ -172,9 +174,9 @@ class MacroAssemblerMIPSShared : public Assembler
//FP branches
void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
- JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
- JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+ JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
void ma_call(ImmPtr dest);
diff --git a/js/src/jit/mips32/Assembler-mips32.cpp b/js/src/jit/mips32/Assembler-mips32.cpp
index 1b86e9d32..461f8d813 100644
--- a/js/src/jit/mips32/Assembler-mips32.cpp
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -120,8 +120,11 @@ js::jit::SA(FloatRegister r)
void
jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
{
- Instruction* inst1 = (Instruction*)jump_.raw();
- Instruction* inst2 = inst1->next();
+ Instruction* inst1;
+ Instruction* inst2;
+
+ inst1 = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw());
+ inst2 = inst1->next();
MaybeAutoWritableJitCode awjc(inst1, 8, reprotect);
AssemblerMIPSShared::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw());
@@ -157,23 +160,6 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
}
}
-void
-Assembler::executableCopy(uint8_t* buffer)
-{
- MOZ_ASSERT(isFinished);
- m_buffer.executableCopy(buffer);
-
- // Patch all long jumps during code copy.
- for (size_t i = 0; i < longJumps_.length(); i++) {
- Instruction* inst1 = (Instruction*) ((uint32_t)buffer + longJumps_[i]);
-
- uint32_t value = Assembler::ExtractLuiOriValue(inst1, inst1->next());
- AssemblerMIPSShared::UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value);
- }
-
- AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
-}
-
uintptr_t
Assembler::GetPointer(uint8_t* instPtr)
{
@@ -314,8 +300,12 @@ void
Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
{
int32_t offset = target - branch;
- InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
- InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // Generate the patchable mixed jump for call.
+ if (inst->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) {
+ addMixedJump(BufferOffset(branch), ImmPtr((void*)target));
+ return;
+ }
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
@@ -325,46 +315,13 @@ Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
return;
}
- // Generate the long jump for calls because return address has to be the
- // address after the reserved block.
- if (inst[0].encode() == inst_bgezal.encode()) {
- addLongJump(BufferOffset(branch));
- Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
- inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
- // There is 1 nop after this.
- return;
- }
-
if (BOffImm16::IsInRange(offset)) {
- bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
- inst[0].encode() != inst_beq.encode());
-
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
-
- // Skip the trailing nops in conditional branches.
- if (conditional) {
- inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*))).encode();
- // There are 2 nops after this
- }
return;
}
- if (inst[0].encode() == inst_beq.encode()) {
- // Handle long unconditional jump.
- addLongJump(BufferOffset(branch));
- Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
- inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- } else {
- // Handle long conditional jump.
- inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(BufferOffset(branch + sizeof(void*)));
- Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target);
- inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- }
+ addMixedJump(BufferOffset(branch), ImmPtr((const void*)target));
}
void
@@ -379,19 +336,16 @@ Assembler::bind(RepatchLabel* label)
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
uint32_t offset = dest.getOffset() - label->offset();
- // If first instruction is lui, then this is a long jump.
+ // If first instruction is j, then this is a mixed jump.
// If second instruction is lui, then this is a loop backedge.
- if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
- // For unconditional long branches generated by ma_liPatchable,
- // such as under:
- // jumpWithpatch
- AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), dest.getOffset());
+ if (inst[0].extractOpcode() == (uint32_t(op_j) >> OpcodeShift)) {
+ // For unconditional long branches generated by jumpWithPatch
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
} else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
BOffImm16::IsInRange(offset))
{
// Handle code produced by:
// backedgeJump
- // branchWithCode
MOZ_ASSERT(BOffImm16::IsInRange(offset));
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
@@ -399,30 +353,25 @@ Assembler::bind(RepatchLabel* label)
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
inst[0].setBOffImm16(BOffImm16(offset));
} else if (inst[0].encode() == inst_beq.encode()) {
- // Handle open long unconditional jumps created by
+ // Handle open mixed unconditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- // We need to add it to long jumps array here.
+ // We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
- MOZ_ASSERT(inst[2].encode() == NopInst);
- MOZ_ASSERT(inst[3].encode() == NopInst);
- addLongJump(BufferOffset(label->offset()));
- Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, dest.getOffset());
- inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
+ inst[0] = InstJump(op_j, JOffImm26(0)).encode();
} else {
- // Handle open long conditional jumps created by
+ // Handle open mixed conditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ inst[0] = invertBranch(inst[0], BOffImm16(4 * sizeof(void*)));
// No need for a "nop" here because we can clobber scratch.
- // We need to add it to long jumps array here.
+ // We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- addLongJump(BufferOffset(label->offset() + sizeof(void*)));
- Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, dest.getOffset());
- inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
+ inst[2] = InstJump(op_j, JOffImm26(0)).encode();
}
}
label->bind(dest.getOffset());
diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h
index cf7f0d228..a519b9876 100644
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -154,10 +154,6 @@ class Assembler : public AssemblerMIPSShared
void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
- // Copy the assembly code to the given buffer, and perform any pending
- // relocations relying on the target address.
- void executableCopy(uint8_t* buffer);
-
static uint32_t PatchWrite_NearCallSize();
static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1);
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
index d3d3a75d1..53d1a7925 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -504,16 +504,13 @@ MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c,
}
void
-MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+MacroAssemblerMIPS::ma_jal(Label* label)
{
if (label->bound()) {
- // Generate the long jump for calls because return address has to be
- // the address after the reserved block.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, Imm32(label->offset()));
- as_jalr(ScratchRegister);
- if (delaySlotFill == FillDelaySlot)
- as_nop();
+ // Generate the mixed jump.
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_jal(JOffImm26(0));
+ as_nop();
return;
}
@@ -521,16 +518,12 @@ MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill)
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
// Make the whole branch continous in the buffer.
- m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
- BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ BufferOffset bo = as_jal(JOffImm26(0));
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
- // Leave space for long jump.
- as_nop();
- if (delaySlotFill == FillDelaySlot)
- as_nop();
}
void
@@ -554,20 +547,18 @@ MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind
}
if (code.encode() == inst_beq.encode()) {
- // Handle long jump
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, Imm32(label->offset()));
- as_jr(ScratchRegister);
+ // Handle mixed jump
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_j(JOffImm26(0));
as_nop();
return;
}
// Handle long conditional branch
- writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, Imm32(label->offset()));
- as_jr(ScratchRegister);
+ writeInst(invertBranch(code, BOffImm16(4 * sizeof(uint32_t))).encode());
+ as_nop();
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_j(JOffImm26(0));
as_nop();
return;
}
@@ -577,33 +568,21 @@ MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
- if (jumpKind == ShortJump) {
- // Make the whole branch continous in the buffer.
- m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ if (jumpKind == ShortJump) {
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
- BufferOffset bo = writeInst(code.encode());
- writeInst(nextInChain);
- if (!oom())
- label->use(bo.getOffset());
- return;
}
-
- bool conditional = code.encode() != inst_beq.encode();
-
- // Make the whole branch continous in the buffer.
- m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
-
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
- // Leave space for potential long jump.
- as_nop();
- as_nop();
- if (conditional)
+ if (jumpKind != ShortJump && code.encode() != inst_beq.encode()) {
as_nop();
+ as_nop();
+ }
}
void
@@ -1597,13 +1576,12 @@ MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentatio
{
// Only one branch per label.
MOZ_ASSERT(!label->used());
- uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
BufferOffset bo = nextOffset();
label->use(bo.getOffset());
- addLongJump(bo);
- ma_liPatchable(ScratchRegister, Imm32(dest));
- as_jr(ScratchRegister);
+ if (label->bound())
+ addMixedJump(bo, ImmPtr((void*)label->offset()), MixedJumpPatch::PATCHABLE);
+ as_j(JOffImm26(0));
as_nop();
return CodeOffsetJump(bo.getOffset());
}
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
index fe36c66be..3b5c06399 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -91,25 +91,25 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared
void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
- void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump)
{
ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind);
}
- void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump)
+ void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump)
{
ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind);
}
- void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_load(ScratchRegister, addr, SizeWord);
ma_b(ScratchRegister, rhs, l, c, jumpKind);
}
- void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+ void ma_jal(Label* l);
// fp instructions
void ma_lid(FloatRegister dest, double value);
diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp
index 4d251f152..6d7636309 100644
--- a/js/src/jit/mips64/Assembler-mips64.cpp
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -87,7 +87,9 @@ js::jit::SA(FloatRegister r)
void
jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect)
{
- Instruction* inst = (Instruction*)jump_.raw();
+ Instruction* inst;
+
+ inst = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw());
// Six instructions used in load 64-bit imm.
MaybeAutoWritableJitCode awjc(inst, 6 * sizeof(uint32_t), reprotect);
@@ -125,23 +127,6 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label,
}
}
-void
-Assembler::executableCopy(uint8_t* buffer)
-{
- MOZ_ASSERT(isFinished);
- m_buffer.executableCopy(buffer);
-
- // Patch all long jumps during code copy.
- for (size_t i = 0; i < longJumps_.length(); i++) {
- Instruction* inst = (Instruction*) ((uintptr_t)buffer + longJumps_[i]);
-
- uint64_t value = Assembler::ExtractLoad64Value(inst);
- Assembler::UpdateLoad64Value(inst, (uint64_t)buffer + value);
- }
-
- AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size());
-}
-
uintptr_t
Assembler::GetPointer(uint8_t* instPtr)
{
@@ -247,8 +232,12 @@ void
Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
{
int64_t offset = target - branch;
- InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
- InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // Generate the patchable mixed jump for call.
+ if (inst->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) {
+ addMixedJump(BufferOffset(branch), ImmPtr((void*)target));
+ return;
+ }
// If encoded offset is 4, then the jump must be short
if (BOffImm16(inst[0]).decode() == 4) {
@@ -258,47 +247,14 @@ Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target)
return;
}
- // Generate the long jump for calls because return address has to be the
- // address after the reserved block.
- if (inst[0].encode() == inst_bgezal.encode()) {
- addLongJump(BufferOffset(branch));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
- inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
- // There is 1 nop after this.
- return;
- }
-
if (BOffImm16::IsInRange(offset)) {
- // Don't skip trailing nops can improve performance
- // on Loongson3 platform.
- bool skipNops = !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
- inst[0].encode() != inst_beq.encode());
-
inst[0].setBOffImm16(BOffImm16(offset));
inst[1].makeNop();
- if (skipNops) {
- inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t))).encode();
- // There are 4 nops after this
- }
return;
}
- if (inst[0].encode() == inst_beq.encode()) {
- // Handle long unconditional jump.
- addLongJump(BufferOffset(branch));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, target);
- inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- } else {
- // Handle long conditional jump.
- inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(BufferOffset(branch + sizeof(uint32_t)));
- Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, target);
- inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
- // There is 1 nop after this.
- }
+ addMixedJump(BufferOffset(branch), ImmPtr((void*)target));
}
void
@@ -313,19 +269,16 @@ Assembler::bind(RepatchLabel* label)
InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
uint64_t offset = dest.getOffset() - label->offset();
- // If first instruction is lui, then this is a long jump.
+ // If first instruction is j, then this is a mixed jump.
// If second instruction is lui, then this is a loop backedge.
- if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) {
- // For unconditional long branches generated by ma_liPatchable,
- // such as under:
- // jumpWithpatch
- Assembler::UpdateLoad64Value(inst, dest.getOffset());
+ if (inst[0].extractOpcode() == (uint32_t(op_j) >> OpcodeShift)) {
+ // For unconditional mixed branches generated by jumpWithPatch
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
} else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) ||
BOffImm16::IsInRange(offset))
{
// Handle code produced by:
// backedgeJump
- // branchWithCode
MOZ_ASSERT(BOffImm16::IsInRange(offset));
MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) ||
inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) ||
@@ -333,34 +286,25 @@ Assembler::bind(RepatchLabel* label)
inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift));
inst[0].setBOffImm16(BOffImm16(offset));
} else if (inst[0].encode() == inst_beq.encode()) {
- // Handle open long unconditional jumps created by
+ // Handle open mixed unconditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- // We need to add it to long jumps array here.
+ // We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS64::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
- MOZ_ASSERT(inst[2].encode() == NopInst);
- MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- MOZ_ASSERT(inst[5].encode() == NopInst);
- addLongJump(BufferOffset(label->offset()));
- Assembler::WriteLoad64Instructions(inst, ScratchRegister, dest.getOffset());
- inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
+ inst[0] = InstJump(op_j, JOffImm26(0)).encode();
} else {
- // Handle open long conditional jumps created by
+ // Handle open mixed conditional jumps created by
// MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...).
- inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
+ inst[0] = invertBranch(inst[0], BOffImm16(4 * sizeof(uint32_t)));
// No need for a "nop" here because we can clobber scratch.
- // We need to add it to long jumps array here.
+ // We need to add it to mixed jumps array here.
// See MacroAssemblerMIPS64::branchWithCode().
MOZ_ASSERT(inst[1].encode() == NopInst);
MOZ_ASSERT(inst[2].encode() == NopInst);
MOZ_ASSERT(inst[3].encode() == NopInst);
- MOZ_ASSERT(inst[4].encode() == NopInst);
- MOZ_ASSERT(inst[5].encode() == NopInst);
- MOZ_ASSERT(inst[6].encode() == NopInst);
- addLongJump(BufferOffset(label->offset() + sizeof(uint32_t)));
- Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister, dest.getOffset());
- inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ addMixedJump(b, ImmPtr((void*)dest.getOffset()), MixedJumpPatch::PATCHABLE);
+ inst[2] = InstJump(op_j, JOffImm26(0)).encode();
}
}
label->bind(dest.getOffset());
@@ -492,8 +436,7 @@ Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal
void
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
{
- InstImm* inst = (InstImm*)code;
- Assembler::UpdateLoad64Value(inst, (uint64_t)imm.value);
+ Assembler::UpdateLoad64Value((Instruction*)code, (uint64_t)imm.value);
}
uint64_t
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
index 8a71c57bb..5cb9d1239 100644
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -153,10 +153,6 @@ class Assembler : public AssemblerMIPSShared
void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
- // Copy the assembly code to the given buffer, and perform any pending
- // relocations relying on the target address.
- void executableCopy(uint8_t* buffer);
-
static uint32_t PatchWrite_NearCallSize();
static uint64_t ExtractLoad64Value(Instruction* inst0);
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
index c35723d74..137a24b59 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -757,36 +757,27 @@ MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c
}
void
-MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill)
+MacroAssemblerMIPS64::ma_jal(Label* label)
{
if (label->bound()) {
- // Generate the long jump for calls because return address has to be
- // the address after the reserved block.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jalr(ScratchRegister);
- if (delaySlotFill == FillDelaySlot)
- as_nop();
+ // Generate the mixed jump.
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_jal(JOffImm26(0));
+ as_nop();
return;
}
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
- // Make the whole branch continous in the buffer. The '6'
+ // Make the whole branch continous in the buffer. The '2'
// instructions are writing at below (contain delay slot).
- m_buffer.ensureSpace(6 * sizeof(uint32_t));
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
- BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ BufferOffset bo = as_jal(JOffImm26(0));
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
- // Leave space for long jump.
- as_nop();
- as_nop();
- as_nop();
- if (delaySlotFill == FillDelaySlot)
- as_nop();
}
void
@@ -810,21 +801,19 @@ MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKi
}
if (code.encode() == inst_beq.encode()) {
- // Handle long jump
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jr(ScratchRegister);
+ // Handle mixed jump
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_j(JOffImm26(0));
as_nop();
return;
}
// Handle long conditional branch, the target offset is based on self,
// point to next instruction of nop at below.
- writeInst(invertBranch(code, BOffImm16(7 * sizeof(uint32_t))).encode());
- // No need for a "nop" here because we can clobber scratch.
- addLongJump(nextOffset());
- ma_liPatchable(ScratchRegister, ImmWord(label->offset()));
- as_jr(ScratchRegister);
+ writeInst(invertBranch(code, BOffImm16(4 * sizeof(uint32_t))).encode());
+ as_nop();
+ addMixedJump(nextOffset(), ImmPtr((void*)label->offset()));
+ as_j(JOffImm26(0));
as_nop();
return;
}
@@ -834,36 +823,21 @@ MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label, JumpKind jumpKi
// Second word holds a pointer to the next branch in label's chain.
uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
- if (jumpKind == ShortJump) {
- // Make the whole branch continous in the buffer.
- m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ if (jumpKind == ShortJump) {
// Indicate that this is short jump with offset 4.
code.setBOffImm16(BOffImm16(4));
- BufferOffset bo = writeInst(code.encode());
- writeInst(nextInChain);
- if (!oom())
- label->use(bo.getOffset());
- return;
}
-
- bool conditional = code.encode() != inst_beq.encode();
-
- // Make the whole branch continous in the buffer. The '7'
- // instructions are writing at below (contain conditional nop).
- m_buffer.ensureSpace(7 * sizeof(uint32_t));
-
BufferOffset bo = writeInst(code.encode());
writeInst(nextInChain);
if (!oom())
label->use(bo.getOffset());
- // Leave space for potential long jump.
- as_nop();
- as_nop();
- as_nop();
- as_nop();
- if (conditional)
+ if (jumpKind != ShortJump && code.encode() != inst_beq.encode()) {
as_nop();
+ as_nop();
+ }
}
void
@@ -1844,13 +1818,12 @@ MacroAssemblerMIPS64Compat::jumpWithPatch(RepatchLabel* label, Label* documentat
{
// Only one branch per label.
MOZ_ASSERT(!label->used());
- uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
BufferOffset bo = nextOffset();
label->use(bo.getOffset());
- addLongJump(bo);
- ma_liPatchable(ScratchRegister, ImmWord(dest));
- as_jr(ScratchRegister);
+ if (label->bound())
+ addMixedJump(bo, ImmPtr((void*)label->offset()), MixedJumpPatch::PATCHABLE);
+ as_j(JOffImm26(0));
as_nop();
return CodeOffsetJump(bo.getOffset());
}
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
index 6b98f1f2c..d4b850096 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -117,17 +117,17 @@ class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared
void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
// branches when done from within mips-specific code
- void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump);
- void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) {
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
MOZ_ASSERT(rhs != ScratchRegister);
ma_load(ScratchRegister, addr, SizeDouble);
ma_b(ScratchRegister, rhs, l, c, jumpKind);
}
- void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+ void ma_jal(Label* l);
// fp instructions
void ma_lid(FloatRegister dest, double value);