diff options
Diffstat (limited to 'js/src/jit/mips-shared/Assembler-mips-shared.cpp')
-rw-r--r-- | js/src/jit/mips-shared/Assembler-mips-shared.cpp | 210 |
1 files changed, 207 insertions, 3 deletions
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp index f813eb946..97f27c2e6 100644 --- a/js/src/jit/mips-shared/Assembler-mips-shared.cpp +++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp @@ -92,6 +92,7 @@ void AssemblerMIPSShared::finish() { MOZ_ASSERT(!isFinished); + GenerateMixedJumps(); isFinished = true; } @@ -100,13 +101,26 @@ AssemblerMIPSShared::asmMergeWith(const AssemblerMIPSShared& other) { if (!AssemblerShared::asmMergeWith(size(), other)) return false; - for (size_t i = 0; i < other.numLongJumps(); i++) { - size_t off = other.longJumps_[i]; - addLongJump(BufferOffset(size() + off)); + for (size_t i = 0; i < other.numMixedJumps(); i++) { + const MixedJumpPatch& mjp = other.mixedJumps_[i]; + addMixedJump(BufferOffset(size() + mjp.src.getOffset()), + size() + mjp.target, mjp.kind); } return m_buffer.appendBuffer(other.m_buffer); } +void +AssemblerMIPSShared::executableCopy(uint8_t* buffer) +{ + MOZ_ASSERT(isFinished); + m_buffer.executableCopy(buffer); + + // Patch all mixed jumps during code copy. + PatchMixedJumps(buffer); + + AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); +} + uint32_t AssemblerMIPSShared::actualIndex(uint32_t idx_) const { @@ -1588,6 +1602,92 @@ AssemblerMIPSShared::bindLater(Label* label, wasm::TrapDesc target) } void +AssemblerMIPSShared::bind(InstImm* inst, uintptr_t branch, uintptr_t target) +{ + intptr_t offset = target - branch; + + // Generate the patchable mixed jump for call. + if (inst->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) { + addMixedJump(BufferOffset(branch), target); + return; + } + + // If encoded offset is 4, then the jump must be short + if (BOffImm16(inst[0]).decode() == 4) { + MOZ_ASSERT(BOffImm16::IsInRange(offset)); + inst[0].setBOffImm16(BOffImm16(offset)); + inst[1].makeNop(); + return; + } + + if (BOffImm16::IsInRange(offset)) { + inst[0].setBOffImm16(BOffImm16(offset)); + inst[1].makeNop(); + return; + } + + MixedJumpPatch::Kind kind = MixedJumpPatch::NONE; + InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); + if (inst[0].encode() != inst_beq.encode()) + kind = MixedJumpPatch::CONDITIONAL; + + addMixedJump(BufferOffset(branch), target, kind); +} + +void +AssemblerMIPSShared::bind(RepatchLabel* label) +{ + BufferOffset dest = nextOffset(); + if (label->used() && !oom()) { + // If the label has a use, then change this use to refer to + // the bound label; + BufferOffset b(label->offset()); + InstImm* inst = (InstImm*)editSrc(b); + InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); + intptr_t offset = dest.getOffset() - label->offset(); + + // If first instruction is j, then this is a mixed jump. + // If second instruction is lui, then this is a loop backedge. + if (inst[0].extractOpcode() == (uint32_t(op_j) >> OpcodeShift)) { + // For unconditional mixed branches generated by jumpWithPatch + addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE); + } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) || + BOffImm16::IsInRange(offset)) + { + // Handle code produced by: + // backedgeJump + MOZ_ASSERT(BOffImm16::IsInRange(offset)); + MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) || + inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) || + inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) || + inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift)); + inst[0].setBOffImm16(BOffImm16(offset)); + } else if (inst[0].encode() == inst_beq.encode()) { + // Handle open mixed unconditional jumps created by + // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...). + // We need to add it to mixed jumps array here. + // See MacroAssemblerMIPS::branchWithCode(). + MOZ_ASSERT(inst[1].encode() == NopInst); + addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE); + inst[0] = InstJump(op_j, JOffImm26(0)).encode(); + } else { + // Handle open mixed conditional jumps created by + // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...). + inst[0] = invertBranch(inst[0], BOffImm16(4 * sizeof(uint32_t))); + // No need for a "nop" here because we can clobber scratch. + // We need to add it to mixed jumps array here. + // See MacroAssemblerMIPS::branchWithCode(). + MOZ_ASSERT(inst[1].encode() == NopInst); + MOZ_ASSERT(inst[2].encode() == NopInst); + MOZ_ASSERT(inst[3].encode() == NopInst); + addMixedJump(b, dest.getOffset(), MixedJumpPatch::PATCHABLE); + inst[2] = InstJump(op_j, JOffImm26(0)).encode(); + } + } + label->bind(dest.getOffset()); +} + +void AssemblerMIPSShared::retarget(Label* label, Label* target) { if (label->used() && !oom()) { @@ -1653,6 +1753,25 @@ AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) *(raw - 1) = imm.value; } +uint32_t +AssemblerMIPSShared::PatchWrite_NearCallSize() +{ + return 2 * sizeof(uint32_t); +} + +void +AssemblerMIPSShared::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) +{ + Instruction* inst = (Instruction*) start.raw(); + + // Overwrite whatever instruction used to be here with a call. + inst[0] = InstJump(op_jal, JOffImm26(uintptr_t(toCall.raw()))); + inst[1] = InstNOP(); + + // Ensure everyone sees the code that was just written into memory. + AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize()); +} + uint8_t* AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count) { @@ -1662,6 +1781,82 @@ AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count) return reinterpret_cast<uint8_t*>(inst->next()); } +Instruction* +AssemblerMIPSShared::GetInstructionImmediateFromJump(Instruction* jump) +{ + if (jump->extractOpcode() == ((uint32_t)op_j >> OpcodeShift) || + jump->extractOpcode() == ((uint32_t)op_jal >> OpcodeShift)) + { + InstJump* j = (InstJump*) jump; + uintptr_t base = (uintptr_t(j) >> Imm28Bits) << Imm28Bits; + uint32_t index = j->extractImm26Value() << 2; + + jump = (Instruction*)(base | index); + if (jump->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift)) + jump = jump->next(); + } + + return jump; +} + +void +AssemblerMIPSShared::PatchMixedJump(uint8_t* src, uint8_t* mid, uint8_t* target) +{ + InstImm* b = (InstImm*)src; + uint32_t opcode = b->extractOpcode(); + int offset; + + if (mid) { + int o = 0; + InstImm* insn = (InstImm*)mid; + + offset = intptr_t(mid); + if (insn->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift)) { + o = 1 * sizeof(uint32_t); + Assembler::PatchInstructionImmediate(mid + Assembler::InstructionImmediateSize() + + 2 * sizeof(uint32_t), PatchedImmPtr(&b[2])); + } + Assembler::PatchInstructionImmediate(mid + o, PatchedImmPtr(target)); + } else { + offset = intptr_t(target); + } + + if (((uint32_t)op_j >> OpcodeShift) == opcode || + ((uint32_t)op_jal >> OpcodeShift) == opcode) + { + InstJump* j = (InstJump*)b; + + j->setJOffImm26(JOffImm26(offset)); + } else { + b[0] = InstJump(op_j, JOffImm26(offset)).encode(); + } +} + +void +AssemblerMIPSShared::PatchMixedJumps(uint8_t* buffer) +{ + // Patch all mixed jumps. + for (size_t i = 0; i < numMixedJumps(); i++) { + MixedJumpPatch& mjp = mixedJump(i); + uint8_t* src = buffer + mjp.src.getOffset(); + uint8_t* mid = nullptr; + uint8_t* target = buffer + mjp.target; + InstImm* b = (InstImm*)src; + + if (mjp.mid.assigned()) { + mid = buffer + mjp.mid.getOffset(); + if (MixedJumpPatch::CONDITIONAL & mjp.kind) { + InstImm* bc = (InstImm*)(buffer + mjp.mid.getOffset()); + BOffImm16 offset(Assembler::InstructionImmediateSize() + 2 * sizeof(uint32_t)); + bc[0] = invertBranch(b[0], offset); + } + } + + PatchMixedJump(src, mid, target); + b[1].makeNop(); + } +} + // Since there are no pools in MIPS implementation, this should be simple. Instruction* Instruction::next() @@ -1744,3 +1939,12 @@ AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_) AutoFlushICache::flush(uintptr_t(inst), 4); } +void +AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value) +{ + MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); + MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); + + ((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value))); + ((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value))); +} |