summaryrefslogtreecommitdiffstats
path: root/js
diff options
context:
space:
mode:
authorJiaxun Yang <jiaxun.yang@flygoat.com>2020-05-12 12:40:14 +0800
committerJiaxun Yang <jiaxun.yang@flygoat.com>2020-05-14 16:31:59 +0800
commit32a4d5c41609ebe01ce08285f8e4996403199bbc (patch)
treeacdd61b9bcbf7c9a7a438e3819ecbcd0e798e0df /js
parent77544de5d66a8bf403a9e9db4c0eb7b98c437a65 (diff)
downloadUXP-32a4d5c41609ebe01ce08285f8e4996403199bbc.tar
UXP-32a4d5c41609ebe01ce08285f8e4996403199bbc.tar.gz
UXP-32a4d5c41609ebe01ce08285f8e4996403199bbc.tar.lz
UXP-32a4d5c41609ebe01ce08285f8e4996403199bbc.tar.xz
UXP-32a4d5c41609ebe01ce08285f8e4996403199bbc.zip
Bug 1412030 - [MIPS] Emit wasm memory access information
Tag: #1542
Diffstat (limited to 'js')
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h10
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp45
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp36
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h4
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.cpp41
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp60
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h12
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.cpp6
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp45
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h12
10 files changed, 164 insertions, 107 deletions
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
index 4cfb30117..1640fdafc 100644
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -1550,7 +1550,15 @@ class InstGS : public Instruction
inline bool
IsUnaligned(const wasm::MemoryAccessDesc& access)
{
- return access.align() && access.align() < access.byteSize();
+ if (!access.align())
+ return false;
+
+#ifdef JS_CODEGEN_MIPS32
+ if (access.type() == Scalar::Int64 && access.align() >= 4)
+ return false;
+#endif
+
+ return access.align() < access.byteSize();
}
} // namespace jit
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
index 2526fc237..d43dbbf38 100644
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1919,12 +1919,14 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
+ FloatRegister output = ToFloatRegister(lir->output());
+
if (byteSize == 4)
- masm.loadUnalignedFloat32(address, temp, ToFloatRegister(lir->output()));
+ masm.loadUnalignedFloat32(mir->access(), address, temp, output);
else
- masm.loadUnalignedDouble(address, temp, ToFloatRegister(lir->output()));
+ masm.loadUnalignedDouble(mir->access(), address, temp, output);
} else {
- masm.ma_load_unaligned(ToRegister(lir->output()), address, temp,
+ masm.ma_load_unaligned(mir->access(), ToRegister(lir->output()), address, temp,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
@@ -1934,16 +1936,20 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
}
if (isFloat) {
- if (byteSize == 4)
- masm.loadFloat32(address, ToFloatRegister(lir->output()));
- else
- masm.loadDouble(address, ToFloatRegister(lir->output()));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ if (byteSize == 4) {
+ masm.loadFloat32(address, output);
+ } else {
+ masm.computeScaledAddress(address, SecondScratchReg);
+ masm.as_ld(output, SecondScratchReg, 0);
+ }
} else {
masm.ma_load(ToRegister(lir->output()), address,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
-
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@@ -2004,12 +2010,14 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
+ FloatRegister value = ToFloatRegister(lir->value());
+
if (byteSize == 4)
- masm.storeUnalignedFloat32(ToFloatRegister(lir->value()), temp, address);
+ masm.storeUnalignedFloat32(mir->access(), value, temp, address);
else
- masm.storeUnalignedDouble(ToFloatRegister(lir->value()), temp, address);
+ masm.storeUnalignedDouble(mir->access(), value, temp, address);
} else {
- masm.ma_store_unaligned(ToRegister(lir->value()), address, temp,
+ masm.ma_store_unaligned(mir->access(), ToRegister(lir->value()), address, temp,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
@@ -2019,16 +2027,23 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
}
if (isFloat) {
+ FloatRegister value = ToFloatRegister(lir->value());
+
if (byteSize == 4) {
- masm.storeFloat32(ToFloatRegister(lir->value()), address);
- } else
- masm.storeDouble(ToFloatRegister(lir->value()), address);
+ masm.storeFloat32(value, address);
+ } else {
+ // For time being storeDouble for mips32 uses two store instructions,
+ // so we emit only one to get correct behavior in case of OOB access.
+ masm.computeScaledAddress(address, SecondScratchReg);
+ masm.as_sd(value, SecondScratchReg, 0);
+ }
} else {
masm.ma_store(ToRegister(lir->value()), address,
static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
}
-
+ // Only the last emitted instruction is a memory access.
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
index 3665f3072..0676863b9 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -463,7 +463,7 @@ MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
}
void
-MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+MacroAssemblerMIPSShared::ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp,
LoadStoreSize size, LoadStoreExtension extension)
{
int16_t lowOffset, hiOffset;
@@ -477,36 +477,41 @@ MacroAssemblerMIPSShared::ma_load_unaligned(Register dest, const BaseIndex& src,
hiOffset = Imm16(src.offset + size / 8 - 1).encode();
} else {
ma_li(ScratchRegister, Imm32(src.offset));
- as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
base = ScratchRegister;
lowOffset = Imm16(0).encode();
hiOffset = Imm16(size / 8 - 1).encode();
}
+ BufferOffset load;
switch (size) {
case SizeHalfWord:
- as_lbu(dest, base, lowOffset);
if (extension != ZeroExtend)
- as_lbu(temp, base, hiOffset);
+ load = as_lbu(temp, base, hiOffset);
else
- as_lb(temp, base, hiOffset);
+ load = as_lb(temp, base, hiOffset);
+ as_lbu(dest, base, lowOffset);
as_ins(dest, temp, 8, 24);
break;
case SizeWord:
- as_lwl(dest, base, hiOffset);
+ load = as_lwl(dest, base, hiOffset);
as_lwr(dest, base, lowOffset);
#ifdef JS_CODEGEN_MIPS64
if (extension != ZeroExtend)
as_dext(dest, dest, 0, 32);
#endif
break;
+#ifdef JS_CODEGEN_MIPS64
case SizeDouble:
- as_ldl(dest, base, hiOffset);
+ load = as_ldl(dest, base, hiOffset);
as_ldr(dest, base, lowOffset);
break;
+#endif
default:
MOZ_CRASH("Invalid argument for ma_load");
}
+
+ append(access, load.getOffset(), asMasm().framePushed());
}
void
@@ -610,7 +615,7 @@ MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
}
void
-MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+MacroAssemblerMIPSShared::ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp,
LoadStoreSize size, LoadStoreExtension extension)
{
int16_t lowOffset, hiOffset;
@@ -624,29 +629,34 @@ MacroAssemblerMIPSShared::ma_store_unaligned(Register data, const BaseIndex& des
hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
- as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
base = ScratchRegister;
lowOffset = Imm16(0).encode();
hiOffset = Imm16(size / 8 - 1).encode();
}
+ BufferOffset store;
switch (size) {
case SizeHalfWord:
- as_sb(data, base, lowOffset);
as_ext(temp, data, 8, 8);
- as_sb(temp, base, hiOffset);
+ store = as_sb(temp, base, hiOffset);
+ as_sb(data, base, lowOffset);
break;
case SizeWord:
- as_swl(data, base, hiOffset);
+ store = as_swl(data, base, hiOffset);
as_swr(data, base, lowOffset);
break;
+#ifdef JS_CODEGEN_MIPS64
case SizeDouble:
- as_sdl(data, base, hiOffset);
+ store = as_sdl(data, base, hiOffset);
as_sdr(data, base, lowOffset);
break;
+#endif
default:
MOZ_CRASH("Invalid argument for ma_store");
}
+
+ append(access, store.getOffset(), asMasm().framePushed());
}
void
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
index cd75b2e37..4f5dcd309 100644
--- a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -108,7 +108,7 @@ class MacroAssemblerMIPSShared : public Assembler
// load
void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
LoadStoreExtension extension = SignExtend);
- void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
+ void ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp,
LoadStoreSize size, LoadStoreExtension extension);
// store
@@ -116,7 +116,7 @@ class MacroAssemblerMIPSShared : public Assembler
LoadStoreExtension extension = SignExtend);
void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
LoadStoreExtension extension = SignExtend);
- void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
+ void ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp,
LoadStoreSize size, LoadStoreExtension extension);
// arithmetic based ops
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp
index 33fea01ae..f41c72f5b 100644
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -494,7 +494,7 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
Register temp = ToRegister(lir->getTemp(1));
if (byteSize <= 4) {
- masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
+ masm.ma_load_unaligned(mir->access(), output.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
if (!isSigned)
@@ -502,12 +502,11 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
else
masm.ma_sra(output.high, output.low, Imm32(31));
} else {
- ScratchRegisterScope scratch(masm);
- masm.ma_load_unaligned(output.low, BaseIndex(HeapReg, ptr, TimesOne),
- temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
- masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
- masm.ma_load_unaligned(output.high, BaseIndex(HeapReg, scratch, TimesOne),
- temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ masm.ma_load_unaligned(mir->access(), output.low,
+ BaseIndex(HeapReg, ptr, TimesOne), temp, SizeWord, ZeroExtend);
+ masm.ma_load_unaligned(mir->access(), output.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
+ SizeWord, SignExtend);
}
return;
}
@@ -515,15 +514,15 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
if (byteSize <= 4) {
masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
if (!isSigned)
masm.move32(Imm32(0), output.high);
else
masm.ma_sra(output.high, output.low, Imm32(31));
} else {
- ScratchRegisterScope scratch(masm);
- masm.ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
- masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
- masm.ma_load(output.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
+ masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
+ masm.ma_load(output.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
+ masm.append(mir->access(), masm.size() - 4 , masm.framePushed());
}
masm.memoryBarrier(mir->access().barrierAfter());
@@ -581,16 +580,15 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
Register temp = ToRegister(lir->getTemp(1));
if (byteSize <= 4) {
- masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
} else {
- ScratchRegisterScope scratch(masm);
- masm.ma_store_unaligned(value.low, BaseIndex(HeapReg, ptr, TimesOne),
- temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
- masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
- masm.ma_store_unaligned(value.high, BaseIndex(HeapReg, scratch, TimesOne),
- temp, SizeWord, isSigned ? SignExtend : ZeroExtend);
+ masm.ma_store_unaligned(mir->access(), value.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), temp,
+ SizeWord, SignExtend);
+ masm.ma_store_unaligned(mir->access(), value.low, BaseIndex(HeapReg, ptr, TimesOne),
+ temp, SizeWord, ZeroExtend);
}
return;
}
@@ -598,11 +596,12 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
if (byteSize <= 4) {
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize));
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
+
} else {
- ScratchRegisterScope scratch(masm);
+ masm.ma_store(value.high, BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), SizeWord);
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.ma_store(value.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
- masm.ma_addu(scratch, ptr, Imm32(INT64HIGH_OFFSET));
- masm.ma_store(value.high, BaseIndex(HeapReg, scratch, TimesOne), SizeWord);
}
masm.memoryBarrier(mir->access().barrierAfter());
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
index 7472ed78b..94aef3f4d 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -801,26 +801,32 @@ MacroAssemblerMIPSCompat::loadDouble(const BaseIndex& src, FloatRegister dest)
}
void
-MacroAssemblerMIPSCompat::loadUnalignedDouble(const BaseIndex& src, Register temp,
- FloatRegister dest)
+MacroAssemblerMIPSCompat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
+ uint32_t framePushed = asMasm().framePushed();
+ BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
- as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
+ load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
+ append(access, load.getOffset(), framePushed);
moveToDoubleLo(temp, dest);
- as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
+ load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
+ append(access, load.getOffset(), framePushed);
moveToDoubleHi(temp, dest);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
+ append(access, load.getOffset(), framePushed);
moveToDoubleLo(temp, dest);
- as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ append(access, load.getOffset(), framePushed);
moveToDoubleHi(temp, dest);
}
}
@@ -853,21 +859,21 @@ MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex& src, FloatRegister dest)
}
void
-MacroAssemblerMIPSCompat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
- FloatRegister dest)
+MacroAssemblerMIPSCompat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
-
+ BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
- as_lwl(temp, SecondScratchReg, src.offset + 3);
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
as_lwr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_lwl(temp, ScratchRegister, 3);
+ load = as_lwl(temp, ScratchRegister, 3);
as_lwr(temp, ScratchRegister, 0);
}
-
+ append(access, load.getOffset(), asMasm().framePushed());
moveToFloat32(temp, dest);
}
@@ -1005,46 +1011,52 @@ MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest)
}
void
-MacroAssemblerMIPSCompat::storeUnalignedFloat32(FloatRegister src, Register temp,
- const BaseIndex& dest)
+MacroAssemblerMIPSCompat::storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromFloat32(src, temp);
+ BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
- as_swl(temp, SecondScratchReg, dest.offset + 3);
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
as_swr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_swl(temp, ScratchRegister, 3);
+ store = as_swl(temp, ScratchRegister, 3);
as_swr(temp, ScratchRegister, 0);
}
+ append(access, store.getOffset(), asMasm().framePushed());
}
void
-MacroAssemblerMIPSCompat::storeUnalignedDouble(FloatRegister src, Register temp,
- const BaseIndex& dest)
+MacroAssemblerMIPSCompat::storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
+ uint32_t framePushed = asMasm().framePushed();
+ BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
+ moveFromDoubleHi(src, temp);
+ store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
moveFromDoubleLo(src, temp);
as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
- moveFromDoubleHi(src, temp);
- as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
- as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
+
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ moveFromDoubleHi(src, temp);
+ store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
moveFromDoubleLo(src, temp);
as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
- moveFromDoubleHi(src, temp);
- as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
- as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
}
+ append(access, store.getOffset(), framePushed);
}
// Note: this function clobbers the input register.
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
index 37d279b09..5cec70430 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -897,7 +897,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
- void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+ Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
@@ -905,7 +906,8 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
- void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+ Register temp, FloatRegister dest);
void store8(Register src, const Address& address);
void store8(Imm32 imm, const Address& address);
@@ -946,8 +948,10 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void storePtr(Register src, const BaseIndex& address);
void storePtr(Register src, AbsoluteAddress dest);
- void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
- void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, FloatRegister src,
+ Register temp, const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access, FloatRegister src,
+ Register temp, const BaseIndex& dest);
void moveDouble(FloatRegister src, FloatRegister dest) {
as_movd(dest, src);
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp
index d1b6a7a32..862960bdf 100644
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -452,7 +452,7 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
- masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ masm.ma_load_unaligned(mir->access(), ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
return;
@@ -460,6 +460,7 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
masm.ma_load(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
@@ -517,13 +518,14 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
- masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
+ masm.ma_store_unaligned(mir->access(), ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
temp, static_cast<LoadStoreSize>(8 * byteSize),
isSigned ? SignExtend : ZeroExtend);
return;
}
masm.ma_store(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
static_cast<LoadStoreSize>(8 * byteSize), isSigned ? SignExtend : ZeroExtend);
+ masm.append(mir->access(), masm.size() - 4, masm.framePushed());
masm.memoryBarrier(mir->access().barrierAfter());
}
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
index 9604e38a4..424e04306 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.cpp
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -1049,21 +1049,21 @@ MacroAssemblerMIPS64Compat::loadDouble(const BaseIndex& src, FloatRegister dest)
}
void
-MacroAssemblerMIPS64Compat::loadUnalignedDouble(const BaseIndex& src, Register temp,
- FloatRegister dest)
+MacroAssemblerMIPS64Compat::loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
-
+ BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 7)) {
- as_ldl(temp, SecondScratchReg, src.offset + 7);
+ load = as_ldl(temp, SecondScratchReg, src.offset + 7);
as_ldr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_ldl(temp, ScratchRegister, 7);
+ load = as_ldl(temp, ScratchRegister, 7);
as_ldr(temp, ScratchRegister, 0);
}
-
+ append(access, load.getOffset(), asMasm().framePushed());
moveToDouble(temp, dest);
}
@@ -1095,21 +1095,21 @@ MacroAssemblerMIPS64Compat::loadFloat32(const BaseIndex& src, FloatRegister dest
}
void
-MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const BaseIndex& src, Register temp,
- FloatRegister dest)
+MacroAssemblerMIPS64Compat::loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp, FloatRegister dest)
{
computeScaledAddress(src, SecondScratchReg);
-
+ BufferOffset load;
if (Imm16::IsInSignedRange(src.offset) && Imm16::IsInSignedRange(src.offset + 3)) {
- as_lwl(temp, SecondScratchReg, src.offset + 3);
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
as_lwr(temp, SecondScratchReg, src.offset);
} else {
ma_li(ScratchRegister, Imm32(src.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_lwl(temp, ScratchRegister, 3);
+ load = as_lwl(temp, ScratchRegister, 3);
as_lwr(temp, ScratchRegister, 0);
}
-
+ append(access, load.getOffset(), asMasm().framePushed());
moveToFloat32(temp, dest);
}
@@ -1251,39 +1251,42 @@ MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest)
}
void
-MacroAssemblerMIPS64Compat::storeUnalignedFloat32(FloatRegister src, Register temp,
- const BaseIndex& dest)
+MacroAssemblerMIPS64Compat::storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromFloat32(src, temp);
-
+ BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 3)) {
- as_swl(temp, SecondScratchReg, dest.offset + 3);
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
as_swr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_swl(temp, ScratchRegister, 3);
+ store = as_swl(temp, ScratchRegister, 3);
as_swr(temp, ScratchRegister, 0);
}
+ append(access, store.getOffset(), asMasm().framePushed());
}
void
-MacroAssemblerMIPS64Compat::storeUnalignedDouble(FloatRegister src, Register temp,
- const BaseIndex& dest)
+MacroAssemblerMIPS64Compat::storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp, const BaseIndex& dest)
{
computeScaledAddress(dest, SecondScratchReg);
moveFromDouble(src, temp);
+ BufferOffset store;
if (Imm16::IsInSignedRange(dest.offset) && Imm16::IsInSignedRange(dest.offset + 7)) {
- as_sdl(temp, SecondScratchReg, dest.offset + 7);
+ store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
as_sdr(temp, SecondScratchReg, dest.offset);
} else {
ma_li(ScratchRegister, Imm32(dest.offset));
as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
- as_sdl(temp, ScratchRegister, 7);
+ store = as_sdl(temp, ScratchRegister, 7);
as_sdr(temp, ScratchRegister, 0);
}
+ append(access, store.getOffset(), asMasm().framePushed());
}
// Note: this function clobbers the input register.
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
index 4991c264f..b50ee7978 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -907,7 +907,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
- void loadUnalignedDouble(const BaseIndex& src, Register temp, FloatRegister dest);
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+ Register temp, FloatRegister dest);
// Load a float value into a register, then expand it to a double.
void loadFloatAsDouble(const Address& addr, FloatRegister dest);
@@ -915,7 +916,8 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadFloat32(const Address& addr, FloatRegister dest);
void loadFloat32(const BaseIndex& src, FloatRegister dest);
- void loadUnalignedFloat32(const BaseIndex& src, Register temp, FloatRegister dest);
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
+ Register temp, FloatRegister dest);
void store8(Register src, const Address& address);
void store8(Imm32 imm, const Address& address);
@@ -954,8 +956,10 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void storePtr(Register src, const BaseIndex& address);
void storePtr(Register src, AbsoluteAddress dest);
- void storeUnalignedFloat32(FloatRegister src, Register temp, const BaseIndex& dest);
- void storeUnalignedDouble(FloatRegister src, Register temp, const BaseIndex& dest);
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access, FloatRegister src,
+ Register temp, const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access, FloatRegister src,
+ Register temp, const BaseIndex& dest);
void moveDouble(FloatRegister src, FloatRegister dest) {
as_movd(dest, src);