diff options
Diffstat (limited to 'js/src/jit/mips32')
-rw-r--r-- | js/src/jit/mips32/Assembler-mips32.cpp | 199 | ||||
-rw-r--r-- | js/src/jit/mips32/Assembler-mips32.h | 19 | ||||
-rw-r--r-- | js/src/jit/mips32/CodeGenerator-mips32.cpp | 45 | ||||
-rw-r--r-- | js/src/jit/mips32/MacroAssembler-mips32-inl.h | 7 | ||||
-rw-r--r-- | js/src/jit/mips32/MacroAssembler-mips32.cpp | 194 | ||||
-rw-r--r-- | js/src/jit/mips32/MacroAssembler-mips32.h | 54 |
6 files changed, 108 insertions, 410 deletions
diff --git a/js/src/jit/mips32/Assembler-mips32.cpp b/js/src/jit/mips32/Assembler-mips32.cpp index 6283c1d5a..f1fb71609 100644 --- a/js/src/jit/mips32/Assembler-mips32.cpp +++ b/js/src/jit/mips32/Assembler-mips32.cpp @@ -120,11 +120,14 @@ js::jit::SA(FloatRegister r) void jit::PatchJump(CodeLocationJump& jump_, CodeLocationLabel label, ReprotectCode reprotect) { - Instruction* inst1 = (Instruction*)jump_.raw(); - Instruction* inst2 = inst1->next(); + Instruction* inst1; + Instruction* inst2; + + inst1 = AssemblerMIPSShared::GetInstructionImmediateFromJump((Instruction*)jump_.raw()); + inst2 = inst1->next(); MaybeAutoWritableJitCode awjc(inst1, 8, reprotect); - Assembler::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw()); + AssemblerMIPSShared::UpdateLuiOriValue(inst1, inst2, (uint32_t)label.raw()); AutoFlushICache::flush(uintptr_t(inst1), 8); } @@ -146,34 +149,17 @@ jit::PatchBackedge(CodeLocationJump& jump, CodeLocationLabel label, } else { if (target == JitRuntime::BackedgeLoopHeader) { Instruction* lui = &branch[1]; - Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr); + AssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), targetAddr); // Jump to ori. The lui will be executed in delay slot. branch->setBOffImm16(BOffImm16(2 * sizeof(uint32_t))); } else { Instruction* lui = &branch[4]; - Assembler::UpdateLuiOriValue(lui, lui->next(), targetAddr); + AssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), targetAddr); branch->setBOffImm16(BOffImm16(4 * sizeof(uint32_t))); } } } -void -Assembler::executableCopy(uint8_t* buffer) -{ - MOZ_ASSERT(isFinished); - m_buffer.executableCopy(buffer); - - // Patch all long jumps during code copy. - for (size_t i = 0; i < longJumps_.length(); i++) { - Instruction* inst1 = (Instruction*) ((uint32_t)buffer + longJumps_[i]); - - uint32_t value = Assembler::ExtractLuiOriValue(inst1, inst1->next()); - Assembler::UpdateLuiOriValue(inst1, inst1->next(), (uint32_t)buffer + value); - } - - AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); -} - uintptr_t Assembler::GetPointer(uint8_t* instPtr) { @@ -207,7 +193,7 @@ TraceOneDataRelocation(JSTracer* trc, Instruction* inst) TraceManuallyBarrieredGenericPointerEdge(trc, reinterpret_cast<gc::Cell**>(&ptr), "ion-masm-ptr"); if (ptr != prior) { - Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr)); + AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr)); AutoFlushICache::flush(uintptr_t(inst), 8); } } @@ -306,153 +292,10 @@ Assembler::Bind(uint8_t* rawCode, CodeOffset* label, const void* address) if (label->bound()) { intptr_t offset = label->offset(); Instruction* inst = (Instruction*) (rawCode + offset); - Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address); - } -} - -void -Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) -{ - int32_t offset = target - branch; - InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)); - InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); - - // If encoded offset is 4, then the jump must be short - if (BOffImm16(inst[0]).decode() == 4) { - MOZ_ASSERT(BOffImm16::IsInRange(offset)); - inst[0].setBOffImm16(BOffImm16(offset)); - inst[1].makeNop(); - return; - } - - // Generate the long jump for calls because return address has to be the - // address after the reserved block. - if (inst[0].encode() == inst_bgezal.encode()) { - addLongJump(BufferOffset(branch)); - Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target); - inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode(); - // There is 1 nop after this. - return; - } - - if (BOffImm16::IsInRange(offset)) { - bool conditional = (inst[0].encode() != inst_bgezal.encode() && - inst[0].encode() != inst_beq.encode()); - - inst[0].setBOffImm16(BOffImm16(offset)); - inst[1].makeNop(); - - // Skip the trailing nops in conditional branches. - if (conditional) { - inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*))).encode(); - // There are 2 nops after this - } - return; - } - - if (inst[0].encode() == inst_beq.encode()) { - // Handle long unconditional jump. - addLongJump(BufferOffset(branch)); - Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, target); - inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); - // There is 1 nop after this. - } else { - // Handle long conditional jump. - inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*))); - // No need for a "nop" here because we can clobber scratch. - addLongJump(BufferOffset(branch + sizeof(void*))); - Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, target); - inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); - // There is 1 nop after this. + AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), (uint32_t)address); } } -void -Assembler::bind(RepatchLabel* label) -{ - BufferOffset dest = nextOffset(); - if (label->used() && !oom()) { - // If the label has a use, then change this use to refer to - // the bound label; - BufferOffset b(label->offset()); - InstImm* inst = (InstImm*)editSrc(b); - InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); - uint32_t offset = dest.getOffset() - label->offset(); - - // If first instruction is lui, then this is a long jump. - // If second instruction is lui, then this is a loop backedge. - if (inst[0].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift)) { - // For unconditional long branches generated by ma_liPatchable, - // such as under: - // jumpWithpatch - Assembler::UpdateLuiOriValue(inst, inst->next(), dest.getOffset()); - } else if (inst[1].extractOpcode() == (uint32_t(op_lui) >> OpcodeShift) || - BOffImm16::IsInRange(offset)) - { - // Handle code produced by: - // backedgeJump - // branchWithCode - MOZ_ASSERT(BOffImm16::IsInRange(offset)); - MOZ_ASSERT(inst[0].extractOpcode() == (uint32_t(op_beq) >> OpcodeShift) || - inst[0].extractOpcode() == (uint32_t(op_bne) >> OpcodeShift) || - inst[0].extractOpcode() == (uint32_t(op_blez) >> OpcodeShift) || - inst[0].extractOpcode() == (uint32_t(op_bgtz) >> OpcodeShift)); - inst[0].setBOffImm16(BOffImm16(offset)); - } else if (inst[0].encode() == inst_beq.encode()) { - // Handle open long unconditional jumps created by - // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...). - // We need to add it to long jumps array here. - // See MacroAssemblerMIPS::branchWithCode(). - MOZ_ASSERT(inst[1].encode() == NopInst); - MOZ_ASSERT(inst[2].encode() == NopInst); - MOZ_ASSERT(inst[3].encode() == NopInst); - addLongJump(BufferOffset(label->offset())); - Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, dest.getOffset()); - inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); - } else { - // Handle open long conditional jumps created by - // MacroAssemblerMIPSShared::ma_b(..., wasm::Trap, ...). - inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*))); - // No need for a "nop" here because we can clobber scratch. - // We need to add it to long jumps array here. - // See MacroAssemblerMIPS::branchWithCode(). - MOZ_ASSERT(inst[1].encode() == NopInst); - MOZ_ASSERT(inst[2].encode() == NopInst); - MOZ_ASSERT(inst[3].encode() == NopInst); - MOZ_ASSERT(inst[4].encode() == NopInst); - addLongJump(BufferOffset(label->offset() + sizeof(void*))); - Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister, dest.getOffset()); - inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode(); - } - } - label->bind(dest.getOffset()); -} - -uint32_t -Assembler::PatchWrite_NearCallSize() -{ - return 4 * sizeof(uint32_t); -} - -void -Assembler::PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) -{ - Instruction* inst = (Instruction*) start.raw(); - uint8_t* dest = toCall.raw(); - - // Overwrite whatever instruction used to be here with a call. - // Always use long jump for two reasons: - // - Jump has to be the same size because of PatchWrite_NearCallSize. - // - Return address has to be at the end of replaced block. - // Short jump wouldn't be more efficient. - Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister, (uint32_t)dest); - inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); - inst[3] = InstNOP(); - - // Ensure everyone sees the code that was just written into memory. - AutoFlushICache::flush(uintptr_t(inst), PatchWrite_NearCallSize()); -} - uint32_t Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) { @@ -467,24 +310,6 @@ Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) } void -Assembler::UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value) -{ - MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift)); - MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift)); - - ((InstImm*) inst0)->setImm16(Imm16::Upper(Imm32(value))); - ((InstImm*) inst1)->setImm16(Imm16::Lower(Imm32(value))); -} - -void -Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1, - Register reg, uint32_t value) -{ - *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value))); - *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value))); -} - -void Assembler::PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) { @@ -503,7 +328,7 @@ Assembler::PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newVal MOZ_ASSERT(value == uint32_t(expectedValue.value)); // Replace with new value - Assembler::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value)); + AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(newValue.value)); AutoFlushICache::flush(uintptr_t(inst), 8); } @@ -512,7 +337,7 @@ void Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm) { InstImm* inst = (InstImm*)code; - Assembler::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value); + AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), (uint32_t)imm.value); } uint32_t diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h index 9fdbcda98..6988d23a5 100644 --- a/js/src/jit/mips32/Assembler-mips32.h +++ b/js/src/jit/mips32/Assembler-mips32.h @@ -144,28 +144,15 @@ class Assembler : public AssemblerMIPSShared } public: - using AssemblerMIPSShared::bind; - - void bind(RepatchLabel* label); void Bind(uint8_t* rawCode, CodeOffset* label, const void* address); static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); - void bind(InstImm* inst, uintptr_t branch, uintptr_t target); - - // Copy the assembly code to the given buffer, and perform any pending - // relocations relying on the target address. - void executableCopy(uint8_t* buffer); - - static uint32_t PatchWrite_NearCallSize(); - + static uint32_t InstructionImmediateSize() { + return 2 * sizeof(uint32_t); + } static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1); - static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1, uint32_t value); - static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1, - Register reg, uint32_t value); - - static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue); static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp index b947c14aa..f41c72f5b 100644 --- a/js/src/jit/mips32/CodeGenerator-mips32.cpp +++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp @@ -490,11 +490,11 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir) masm.memoryBarrier(mir->access().barrierBefore()); MOZ_ASSERT(INT64LOW_OFFSET == 0); - if (mir->access().isUnaligned()) { + if (IsUnaligned(mir->access())) { Register temp = ToRegister(lir->getTemp(1)); if (byteSize <= 4) { - masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne), + masm.ma_load_unaligned(mir->access(), output.low, BaseIndex(HeapReg, ptr, TimesOne), temp, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); if (!isSigned) @@ -502,12 +502,11 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir) else masm.ma_sra(output.high, output.low, Imm32(31)); } else { - ScratchRegisterScope scratch(masm); - masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne), - temp, SizeWord, isSigned ? SignExtend : ZeroExtend); - masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET)); - masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne), - temp, SizeWord, isSigned ? SignExtend : ZeroExtend); + masm.ma_load_unaligned(mir->access(), output.low, + BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord, ZeroExtend); + masm.ma_load_unaligned(mir->access(), output.high, + BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp, + SizeWord, SignExtend); } return; } @@ -515,15 +514,15 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir) if (byteSize <= 4) { masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); + masm.append(mir->access(), masm.size() - 4 , masm.framePushed()); if (!isSigned) masm.move32(Imm32(0), output.high); else masm.ma_sra(output.high, output.low, Imm32(31)); } else { - ScratchRegisterScope scratch(masm); - masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord); - masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET)); - masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord); + masm.append(mir->access(), masm.size() - 4 , masm.framePushed()); + masm.ma_load(output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord); + masm.append(mir->access(), masm.size() - 4 , masm.framePushed()); } masm.memoryBarrier(mir->access().barrierAfter()); @@ -577,20 +576,19 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir) masm.memoryBarrier(mir->access().barrierBefore()); MOZ_ASSERT(INT64LOW_OFFSET == 0); - if (mir->access().isUnaligned()) { + if (IsUnaligned(mir->access())) { Register temp = ToRegister(lir->getTemp(1)); if (byteSize <= 4) { - masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne), + masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne), temp, static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend); } else { - ScratchRegisterScope scratch(masm); - masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne), - temp, SizeWord, isSigned ? SignExtend : ZeroExtend); - masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET)); - masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne), - temp, SizeWord, isSigned ? SignExtend : ZeroExtend); + masm.ma_store_unaligned(mir->access(), value.high, + BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp, + SizeWord, SignExtend); + masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne), + temp, SizeWord, ZeroExtend); } return; } @@ -598,11 +596,12 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir) if (byteSize <= 4) { masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), static_cast<LoadStoreSize>(8 * byteSize)); + masm.append(mir->access(), masm.size() - 4, masm.framePushed()); + } else { - ScratchRegisterScope scratch(masm); + masm.ma_store(value.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord); + masm.append(mir->access(), masm.size() - 4, masm.framePushed()); masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord); - masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET)); - masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord); } masm.memoryBarrier(mir->access().barrierAfter()); diff --git a/js/src/jit/mips32/MacroAssembler-mips32-inl.h b/js/src/jit/mips32/MacroAssembler-mips32-inl.h index 2dae8fb87..a86cead29 100644 --- a/js/src/jit/mips32/MacroAssembler-mips32-inl.h +++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h @@ -302,8 +302,9 @@ MacroAssembler::neg64(Register64 reg) void MacroAssembler::mulBy3(Register src, Register dest) { - as_addu(dest, src, src); - as_addu(dest, dest, src); + MOZ_ASSERT(src != ScratchRegister); + as_addu(ScratchRegister, src, src); + as_addu(dest, ScratchRegister, src); } void @@ -1042,7 +1043,7 @@ MacroAssembler::wasmPatchBoundsCheck(uint8_t* patchAt, uint32_t limit) InstImm* i1 = (InstImm*) i0->next(); // Replace with new value - Assembler::UpdateLuiOriValue(i0, i1, limit); + AssemblerMIPSShared::UpdateLuiOriValue(i0, i1, limit); } //}}} check_macroassembler_style diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp index 2b2fab92d..94aef3f4d 100644 --- a/js/src/jit/mips32/MacroAssembler-mips32.cpp +++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp @@ -202,17 +202,6 @@ MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm) ma_li(dest, Imm32(uint32_t(imm.value))); } -// This method generates lui and ori instruction pair that can be modified by -// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or -// during execution (eg. jit::PatchJump). -void -MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm) -{ - m_buffer.ensureSpace(2 * sizeof(uint32_t)); - as_lui(dest, Imm16::Upper(imm).encode()); - as_ori(dest, dest, Imm16::Lower(imm).encode()); -} - void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm) { @@ -514,122 +503,6 @@ MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label, Condition c, ma_b(SecondScratchReg, imm, label, c, jumpKind); } -void -MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill) -{ - if (label->bound()) { - // Generate the long jump for calls because return address has to be - // the address after the reserved block. - addLongJump(nextOffset()); - ma_liPatchable(ScratchRegister, Imm32(label->offset())); - as_jalr(ScratchRegister); - if (delaySlotFill == FillDelaySlot) - as_nop(); - return; - } - - // Second word holds a pointer to the next branch in label's chain. - uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET; - - // Make the whole branch continous in the buffer. - m_buffer.ensureSpace(4 * sizeof(uint32_t)); - - BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode()); - writeInst(nextInChain); - if (!oom()) - label->use(bo.getOffset()); - // Leave space for long jump. - as_nop(); - if (delaySlotFill == FillDelaySlot) - as_nop(); -} - -void -MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label, JumpKind jumpKind) -{ - MOZ_ASSERT(code.encode() != InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode()); - InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0)); - - if (label->bound()) { - int32_t offset = label->offset() - m_buffer.nextOffset().getOffset(); - - if (BOffImm16::IsInRange(offset)) - jumpKind = ShortJump; - - if (jumpKind == ShortJump) { - MOZ_ASSERT(BOffImm16::IsInRange(offset)); - code.setBOffImm16(BOffImm16(offset)); - writeInst(code.encode()); - as_nop(); - return; - } - - if (code.encode() == inst_beq.encode()) { - // Handle long jump - addLongJump(nextOffset()); - ma_liPatchable(ScratchRegister, Imm32(label->offset())); - as_jr(ScratchRegister); - as_nop(); - return; - } - - // Handle long conditional branch - writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode()); - // No need for a "nop" here because we can clobber scratch. - addLongJump(nextOffset()); - ma_liPatchable(ScratchRegister, Imm32(label->offset())); - as_jr(ScratchRegister); - as_nop(); - return; - } - - // Generate open jump and link it to a label. - - // Second word holds a pointer to the next branch in label's chain. - uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET; - - if (jumpKind == ShortJump) { - // Make the whole branch continous in the buffer. - m_buffer.ensureSpace(2 * sizeof(uint32_t)); - - // Indicate that this is short jump with offset 4. - code.setBOffImm16(BOffImm16(4)); - BufferOffset bo = writeInst(code.encode()); - writeInst(nextInChain); - if (!oom()) - label->use(bo.getOffset()); - return; - } - - bool conditional = code.encode() != inst_beq.encode(); - - // Make the whole branch continous in the buffer. - m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t)); - - BufferOffset bo = writeInst(code.encode()); - writeInst(nextInChain); - if (!oom()) - label->use(bo.getOffset()); - // Leave space for potential long jump. - as_nop(); - as_nop(); - if (conditional) - as_nop(); -} - -void -MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c) -{ - ma_lw(ScratchRegister, addr); - ma_cmp_set(rd, rs, ScratchRegister, c); -} - -void -MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c) -{ - ma_lw(ScratchRegister, lhs); - ma_cmp_set(dst, ScratchRegister, rhs, c); -} // fp instructions void @@ -928,26 +801,32 @@ MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest) } void -MacroAssemblerMIPSCompat::loadUnalignedDouble(const BaseIndex& src, Register temp, - FloatRegister dest) +MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access, + const BaseIndex& src, Register temp, FloatRegister dest) { computeScaledAddress(src, SecondScratchReg); + uint32_t framePushed = asMasm().framePushed(); + BufferOffset load; if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) { - as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3); + load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3); as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET); + append(access, load.getOffset(), framePushed); moveToDoubleLo(temp, dest); - as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3); + load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3); as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET); + append(access, load.getOffset(), framePushed); moveToDoubleHi(temp, dest); } else { ma_li(ScratchRegister, Imm32(src.offset)); as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); - as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3); + load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3); as_lwr(temp, ScratchRegister, INT64LOW_OFFSET); + append(access, load.getOffset(), framePushed); moveToDoubleLo(temp, dest); - as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3); + load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3); as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET); + append(access, load.getOffset(), framePushed); moveToDoubleHi(temp, dest); } } @@ -980,21 +859,21 @@ MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest) } void -MacroAssemblerMIPSCompat::loadUnalignedFloat32(const BaseIndex& src, Register temp, - FloatRegister dest) +MacroAssemblerMIPSCompat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, + const BaseIndex& src, Register temp, FloatRegister dest) { computeScaledAddress(src, SecondScratchReg); - + BufferOffset load; if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) { - as_lwl(temp, SecondScratchReg, src.offset + 3); + load = as_lwl(temp, SecondScratchReg, src.offset + 3); as_lwr(temp, SecondScratchReg, src.offset); } else { ma_li(ScratchRegister, Imm32(src.offset)); as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); - as_lwl(temp, ScratchRegister, 3); + load = as_lwl(temp, ScratchRegister, 3); as_lwr(temp, ScratchRegister, 0); } - + append(access, load.getOffset(), asMasm().framePushed()); moveToFloat32(temp, dest); } @@ -1132,46 +1011,52 @@ MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest) } void -MacroAssemblerMIPSCompat::storeUnalignedFloat32(FloatRegister src, Register temp, - const BaseIndex& dest) +MacroAssemblerMIPSCompat::storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, + FloatRegister src, Register temp, const BaseIndex& dest) { computeScaledAddress(dest, SecondScratchReg); moveFromFloat32(src, temp); + BufferOffset store; if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) { - as_swl(temp, SecondScratchReg, dest.offset + 3); + store = as_swl(temp, SecondScratchReg, dest.offset + 3); as_swr(temp, SecondScratchReg, dest.offset); } else { ma_li(ScratchRegister, Imm32(dest.offset)); as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); - as_swl(temp, ScratchRegister, 3); + store = as_swl(temp, ScratchRegister, 3); as_swr(temp, ScratchRegister, 0); } + append(access, store.getOffset(), asMasm().framePushed()); } void -MacroAssemblerMIPSCompat::storeUnalignedDouble(FloatRegister src, Register temp, - const BaseIndex& dest) +MacroAssemblerMIPSCompat::storeUnalignedDouble(const wasm::MemoryAccessDesc& access, + FloatRegister src, Register temp, const BaseIndex& dest) { computeScaledAddress(dest, SecondScratchReg); + uint32_t framePushed = asMasm().framePushed(); + BufferOffset store; if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) { + moveFromDoubleHi(src, temp); + store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3); + as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET); moveFromDoubleLo(src, temp); as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3); as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET); - moveFromDoubleHi(src, temp); - as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3); - as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET); + } else { ma_li(ScratchRegister, Imm32(dest.offset)); as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister); + moveFromDoubleHi(src, temp); + store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3); + as_swr(temp, ScratchRegister, INT64HIGH_OFFSET); moveFromDoubleLo(src, temp); as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3); as_swr(temp, ScratchRegister, INT64LOW_OFFSET); - moveFromDoubleHi(src, temp); - as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3); - as_swr(temp, ScratchRegister, INT64HIGH_OFFSET); } + append(access, store.getOffset(), framePushed); } // Note: this function clobbers the input register. @@ -1608,13 +1493,12 @@ MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel* label, Label* documentatio { // Only one branch per label. MOZ_ASSERT(!label->used()); - uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET; BufferOffset bo = nextOffset(); label->use(bo.getOffset()); - addLongJump(bo); - ma_liPatchable(ScratchRegister, Imm32(dest)); - as_jr(ScratchRegister); + if (label->bound()) + addMixedJump(bo, label->offset(), MixedJumpPatch::PATCHABLE); + as_j(JOffImm26(0)); as_nop(); return CodeOffsetJump(bo.getOffset()); } diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h index adb626bb0..5cec70430 100644 --- a/js/src/jit/mips32/MacroAssembler-mips32.h +++ b/js/src/jit/mips32/MacroAssembler-mips32.h @@ -46,6 +46,7 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared public: using MacroAssemblerMIPSShared::ma_b; using MacroAssemblerMIPSShared::ma_li; + using MacroAssemblerMIPSShared::ma_liPatchable; using MacroAssemblerMIPSShared::ma_ss; using MacroAssemblerMIPSShared::ma_sd; using MacroAssemblerMIPSShared::ma_load; @@ -55,7 +56,6 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared void ma_li(Register dest, CodeOffset* label); - void ma_liPatchable(Register dest, Imm32 imm); void ma_li(Register dest, ImmWord imm); void ma_liPatchable(Register dest, ImmPtr imm); void ma_liPatchable(Register dest, ImmWord imm); @@ -89,28 +89,25 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared void ma_pop(Register r); void ma_push(Register r); - void branchWithCode(InstImm code, Label* label, JumpKind jumpKind); // branches when done from within mips-specific code - void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump) + void ma_b(Register lhs, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump) { ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind); } - void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = LongJump) + void ma_b(Address addr, ImmWord imm, Label* l, Condition c, JumpKind jumpKind = MixedJump) { ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind); } - void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = LongJump); - void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump); - void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump); - void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump) { + void ma_b(Register lhs, Address addr, Label* l, Condition c, JumpKind jumpKind = MixedJump); + void ma_b(Address addr, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump); + void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump); + void ma_b(Address addr, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump) { MOZ_ASSERT(rhs != ScratchRegister); - ma_load(ScratchRegister, addr, SizeWord); + ma_lw(ScratchRegister, addr); ma_b(ScratchRegister, rhs, l, c, jumpKind); } - void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot); - // fp instructions void ma_lid(FloatRegister dest, double value); @@ -128,12 +125,19 @@ class MacroAssemblerMIPS : public MacroAssemblerMIPSShared void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) { ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c); } - void ma_cmp_set(Register rd, Register rs, Address addr, Condition c); - void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c); - void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) { + void ma_cmp_set(Register dst, Register lhs, Address addr, Condition c) { + MOZ_ASSERT(lhs != ScratchRegister); + ma_lw(ScratchRegister, addr); + ma_cmp_set(dst, lhs, ScratchRegister, c); + } + void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c) { + MOZ_ASSERT(rhs != ScratchRegister); ma_lw(ScratchRegister, lhs); - ma_li(SecondScratchReg, Imm32(uint32_t(imm.value))); - ma_cmp_set(dst, ScratchRegister, SecondScratchReg, c); + ma_cmp_set(dst, ScratchRegister, rhs, c); + } + void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) { + ma_lw(SecondScratchReg, lhs); + ma_cmp_set(dst, SecondScratchReg, imm, c); } // These fuctions abstract the access to high part of the double precision @@ -893,7 +897,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS void loadDouble(const Address& addr, FloatRegister dest); void loadDouble(const BaseIndex& src, FloatRegister dest); - void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest); + void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src, + Register temp, FloatRegister dest); // Load a float value into a register, then expand it to a double. void loadFloatAsDouble(const Address& addr, FloatRegister dest); @@ -901,7 +906,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS void loadFloat32(const Address& addr, FloatRegister dest); void loadFloat32(const BaseIndex& src, FloatRegister dest); - void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest); + void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src, + Register temp, FloatRegister dest); void store8(Register src, const Address& address); void store8(Imm32 imm, const Address& address); @@ -942,8 +948,10 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS void storePtr(Register src, const BaseIndex& address); void storePtr(Register src, AbsoluteAddress dest); - void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest); - void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest); + void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, FloatRegister src, + Register temp, const BaseIndex& dest); + void storeUnalignedDouble(const wasm::MemoryAccessDesc& access, FloatRegister src, + Register temp, const BaseIndex& dest); void moveDouble(FloatRegister src, FloatRegister dest) { as_movd(dest, src); @@ -990,12 +998,6 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS ma_sw(imm, addr); } - BufferOffset ma_BoundsCheck(Register bounded) { - BufferOffset bo = m_buffer.nextOffset(); - ma_liPatchable(bounded, ImmWord(0)); - return bo; - } - void moveFloat32(FloatRegister src, FloatRegister dest) { as_movs(dest, src); } |